From 6d1f22a420db320b8cbbdca5771e941d4129b069 Mon Sep 17 00:00:00 2001 From: Evan Carlson <45178375+EvanCarlson@users.noreply.github.com> Date: Fri, 19 May 2023 14:13:46 -0700 Subject: [PATCH] add support to uploading zip file (#62) --- components_keys.py | 4 + files.py | 183 ++++++++++++++++++++++++++++++++++----------- loaders/common.py | 4 +- main.py | 8 +- utils.py | 4 +- 5 files changed, 152 insertions(+), 51 deletions(-) create mode 100644 components_keys.py diff --git a/components_keys.py b/components_keys.py new file mode 100644 index 000000000..bcdd110b9 --- /dev/null +++ b/components_keys.py @@ -0,0 +1,4 @@ +"""Store streamlit component keys""" + +class ComponentsKeys: + FILE_UPLOADER = "file_uploader" diff --git a/files.py b/files.py index d0f915c8b..0238bdffa 100644 --- a/files.py +++ b/files.py @@ -1,21 +1,37 @@ -import streamlit as st -from streamlit.runtime.uploaded_file_manager import UploadedFile, UploadedFileRec import os +from typing import ( + Any, + Union, +) +import zipfile +import streamlit as st +from streamlit.runtime.uploaded_file_manager import ( + UploadedFile, + UploadedFileRec, + UploadedFileManager, +) +from streamlit.runtime.scriptrunner import get_script_run_ctx +from supabase.client import Client +from langchain.vectorstores.supabase import SupabaseVectorStore +from components_keys import ComponentsKeys from loaders.audio import process_audio from loaders.txt import process_txt from loaders.csv import process_csv from loaders.markdown import process_markdown -from loaders.html import process_html -from utils import compute_sha1_from_content from loaders.pdf import process_pdf -from loaders.html import get_html, create_html_file, delete_tempfile +from loaders.html import ( + create_html_file, + delete_tempfile, + get_html, + process_html, +) from loaders.powerpoint import process_powerpoint from loaders.docx import process_docx -import requests -import re -import unicodedata -import tempfile +from utils import compute_sha1_from_content + +ctx = get_script_run_ctx() +manager = UploadedFileManager() file_processors = { ".txt": process_txt, ".csv": process_csv, @@ -30,17 +46,33 @@ file_processors = { ".mpeg": process_audio, ".pdf": process_pdf, ".html": process_html, - ".pptx": process_powerpoint, - ".docx": process_docx + ".pptx": process_powerpoint, + ".docx": process_docx } -def file_uploader(supabase, openai_key, vector_store): +def file_uploader(supabase, vector_store): + # Omit zip file support if the `st.secrets.self_hosted` != "true" because + # a zip file can consist of multiple files so the limit on 1 file uploaded + # at a time in the demo can be circumvented. + accepted_file_extensions = list(file_processors.keys()) accept_multiple_files = st.secrets.self_hosted == "true" - files = st.file_uploader("**Upload a file**", accept_multiple_files=accept_multiple_files, type=list(file_processors.keys())) + if accept_multiple_files: + accepted_file_extensions += [".zip"] + + files = st.file_uploader( + "**Upload a file**", + accept_multiple_files=accept_multiple_files, + type=accepted_file_extensions, + key=ComponentsKeys.FILE_UPLOADER, + ) if st.secrets.self_hosted == "false": st.markdown("**In demo mode, the max file size is 1MB**") if st.button("Add to Database"): - if files is not None: + # Single file upload + if isinstance(files, UploadedFile): + filter_file(files, supabase, vector_store) + # Multiple files upload + elif isinstance(files, list): for file in files: filter_file(file, supabase, vector_store) @@ -49,42 +81,107 @@ def file_already_exists(supabase, file): response = supabase.table("documents").select("id").eq("metadata->>file_sha1", file_sha1).execute() return len(response.data) > 0 +def file_to_uploaded_file(file: Any) -> Union[None, UploadedFile]: + """Convert a file to a streamlit `UploadedFile` object. + + This allows us to unzip files and treat them the same way + streamlit treats files uploaded through the file uploader. + + Parameters + --------- + file : Any + The file. Can be any file supported by this app. + + Returns + ------- + Union[None, UploadedFile] + The file converted to a streamlit `UploadedFile` object. + Returns `None` if the script context cannot be grabbed. + """ + + if ctx is None: + print("script context not found, skipping uploading file:", file.name) + return + + file_extension = os.path.splitext(file.name)[-1] + file_name = file.name + file_data = file.read() + # The file manager will automatically assign an ID so pass `None` + # Reference: https://github.com/streamlit/streamlit/blob/9a6ce804b7977bdc1f18906d1672c45f9a9b3398/lib/streamlit/runtime/uploaded_file_manager.py#LL98C6-L98C6 + uploaded_file_rec = UploadedFileRec(None, file_name, file_extension, file_data) + uploaded_file_rec = manager.add_file( + ctx.session_id, + ComponentsKeys.FILE_UPLOADER, + uploaded_file_rec, + ) + return UploadedFile(uploaded_file_rec) + +def filter_zip_file( + file: UploadedFile, + supabase: Client, + vector_store: SupabaseVectorStore, +) -> None: + """Unzip the zip file then filter each unzipped file. + + Parameters + ---------- + file : UploadedFile + The uploaded file from the file uploader. + supabase : Client + The supabase client. + vector_store : SupabaseVectorStore + The vector store in the database. + """ + + with zipfile.ZipFile(file, "r") as z: + unzipped_files = z.namelist() + for unzipped_file in unzipped_files: + with z.open(unzipped_file, "r") as f: + filter_file(f, supabase, vector_store) + def filter_file(file, supabase, vector_store): + # Streamlit file uploads are of type `UploadedFile` which has the + # necessary methods and attributes for this app to work. + if not isinstance(file, UploadedFile): + file = file_to_uploaded_file(file) + + file_extension = os.path.splitext(file.name)[-1] + if file_extension == ".zip": + filter_zip_file(file, supabase, vector_store) + return True + if file_already_exists(supabase, file): st.write(f"😎 {file.name} is already in the database.") return False - elif file.size < 1: + + if file.size < 1: st.write(f"💨 {file.name} is empty.") return False - else: - file_extension = os.path.splitext(file.name)[-1] - if file_extension in file_processors: - if st.secrets.self_hosted == "false": - file_processors[file_extension](vector_store, file, stats_db=supabase) - else: - file_processors[file_extension](vector_store, file, stats_db=None) - st.write(f"✅ {file.name} ") - return True + + if file_extension in file_processors: + if st.secrets.self_hosted == "false": + file_processors[file_extension](vector_store, file, stats_db=supabase) else: - st.write(f"❌ {file.name} is not a valid file type.") - return False + file_processors[file_extension](vector_store, file, stats_db=None) + st.write(f"✅ {file.name} ") + return True -def url_uploader(supabase, openai_key, vector_store): - - url = st.text_area("**Add an url**",placeholder="https://www.quivr.app") - button = st.button("Add the URL to the database") + st.write(f"❌ {file.name} is not a valid file type.") + return False - if button: - if not st.session_state["overused"]: - html = get_html(url) - if html: - st.write(f"Getting content ... {url} ") - file, temp_file_path = create_html_file(url, html) - ret = filter_file(file, supabase, vector_store) - delete_tempfile(temp_file_path, url, ret) - else: - st.write(f"❌ Failed to access to {url} .") +def url_uploader(supabase, vector_store): + url = st.text_area("**Add an url**",placeholder="https://www.quivr.app") + button = st.button("Add the URL to the database") + + if button: + if not st.session_state["overused"]: + html = get_html(url) + if html: + st.write(f"Getting content ... {url} ") + file, temp_file_path = create_html_file(url, html) + ret = filter_file(file, supabase, vector_store) + delete_tempfile(temp_file_path, url, ret) else: - st.write("You have reached your daily limit. Please come back later or self host the solution.") - - + st.write(f"❌ Failed to access to {url} .") + else: + st.write("You have reached your daily limit. Please come back later or self host the solution.") diff --git a/loaders/common.py b/loaders/common.py index 4d5e0d5ce..70c3aed92 100644 --- a/loaders/common.py +++ b/loaders/common.py @@ -9,13 +9,13 @@ from stats import add_usage def process_file(vector_store, file, loader_class, file_suffix, stats_db=None): documents = [] - file_sha = "" file_name = file.name file_size = file.size if st.secrets.self_hosted == "false": if file_size > 1000000: st.error("File size is too large. Please upload a file smaller than 1MB or self host.") return + dateshort = time.strftime("%Y%m%d") with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as tmp_file: tmp_file.write(file.getvalue()) @@ -24,6 +24,7 @@ def process_file(vector_store, file, loader_class, file_suffix, stats_db=None): loader = loader_class(tmp_file.name) documents = loader.load() file_sha1 = compute_sha1_from_file(tmp_file.name) + os.remove(tmp_file.name) chunk_size = st.session_state['chunk_size'] @@ -39,4 +40,3 @@ def process_file(vector_store, file, loader_class, file_suffix, stats_db=None): vector_store.add_documents(docs_with_metadata) if stats_db: add_usage(stats_db, "embedding", "file", metadata={"file_name": file_name,"file_type": file_suffix, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap}) - return \ No newline at end of file diff --git a/main.py b/main.py index f6aafd067..816bb5055 100644 --- a/main.py +++ b/main.py @@ -88,15 +88,15 @@ if user_choice == 'Add Knowledge': col1, col2 = st.columns(2) with col1: - file_uploader(supabase, openai_api_key, vector_store) + file_uploader(supabase, vector_store) with col2: - url_uploader(supabase, openai_api_key, vector_store) + url_uploader(supabase, vector_store) elif user_choice == 'Chat with your Brain': # Display model and temperature selection only when asking questions st.sidebar.title("Configuration") st.sidebar.markdown( "Choose your model and temperature for asking questions.") - if st.secrets.self_hosted != "false": + if self_hosted != "false": st.session_state['model'] = st.sidebar.selectbox( "Select Model", models, index=(models).index(st.session_state['model'])) else: @@ -120,4 +120,4 @@ elif user_choice == 'Explore': st.sidebar.title("Configuration") view_document(supabase) -st.markdown("---\n\n") +st.markdown("---\n\n") \ No newline at end of file diff --git a/utils.py b/utils.py index 7075d3cf8..29a207e83 100644 --- a/utils.py +++ b/utils.py @@ -3,9 +3,9 @@ import hashlib def compute_sha1_from_file(file_path): with open(file_path, "rb") as file: bytes = file.read() - readable_hash = hashlib.sha1(bytes).hexdigest() + readable_hash = compute_sha1_from_content(bytes) return readable_hash def compute_sha1_from_content(content): readable_hash = hashlib.sha1(content).hexdigest() - return readable_hash \ No newline at end of file + return readable_hash