mirror of
https://github.com/StanGirard/quivr.git
synced 2024-11-23 21:22:35 +03:00
f952d7a269
* feat(v2): loaders added * feature: Add scroll animations * feature: upload ui * feature: upload multiple files * fix: Same file name and size remove * feat(crawler): added * feat(parsers): v2 added more * feat(v2): audio now working * feat(v2): all loaders * feat(v2): explorer * chore: add links * feat(api): added status in return message * refactor(website): remove old code * feat(upload): return type for messages * feature: redirect to upload if ENV=local * fix(chat): fixed some issues * feature: respect response type * loading state * feature: Loading stat * feat(v2): added explore and chat pages * feature: modal settings * style: Chat UI * feature: scroll to bottom when chatting * feature: smooth scroll in chat * feature(anim): Slide chat in * feature: markdown chat * feat(explorer): list * feat(doc): added document item * feat(explore): added modal * Add clarification on Project API keys and web interface for migration scripts to Readme (#58) * fix(demo): changed link * add support to uploading zip file (#62) * Catch UnicodeEncodeError exception (#64) * feature: fixed chatbar * fix(loaders): missing argument * fix: layout * fix: One whole chatbox * fix: Scroll into view * fix(build): vercel issues * chore(streamlit): moved to own file * refactor(api): moved to backend folder * feat(docker): added docker compose * Fix a bug where langchain memories were not being cleaned (#71) * Update README.md (#70) * chore(streamlit): moved to own file * refactor(api): moved to backend folder * docs(readme): updated for new version * docs(readme): added old readme * docs(readme): update copy dot env file * docs(readme): cleanup --------- Co-authored-by: iMADi-ARCH <nandanaditya985@gmail.com> Co-authored-by: Matt LeBel <github@lebel.io> Co-authored-by: Evan Carlson <45178375+EvanCarlson@users.noreply.github.com> Co-authored-by: Mustafa Hasan Khan <65130881+mustafahasankhan@users.noreply.github.com> Co-authored-by: zhulixi <48713110+zlxxlz1026@users.noreply.github.com> Co-authored-by: Stanisław Tuszyński <stanislaw@tuszynski.me>
53 lines
2.2 KiB
Python
53 lines
2.2 KiB
Python
from typing import Optional
|
|
from fastapi import UploadFile
|
|
from langchain.schema import Document
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
# from stats import add_usage
|
|
import asyncio
|
|
import os
|
|
import tempfile
|
|
import time
|
|
from utils import compute_sha1_from_file, compute_sha1_from_content
|
|
|
|
async def process_file(vector_store, file: UploadFile, loader_class,file_suffix, stats_db: Optional = None):
|
|
documents = []
|
|
file_sha = ""
|
|
file_name = file.filename
|
|
file_size = file.file._file.tell() # Getting the size of the file
|
|
dateshort = time.strftime("%Y%m%d")
|
|
|
|
# Here, we're writing the uploaded file to a temporary file, so we can use it with your existing code.
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=file.filename) as tmp_file:
|
|
await file.seek(0)
|
|
content = await file.read()
|
|
tmp_file.write(content)
|
|
tmp_file.flush()
|
|
|
|
loader = loader_class(tmp_file.name)
|
|
documents = loader.load()
|
|
file_sha1 = compute_sha1_from_file(tmp_file.name) # Ensure this function works with FastAPI
|
|
|
|
os.remove(tmp_file.name)
|
|
chunk_size = 500
|
|
chunk_overlap = 0
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
|
|
|
print(documents)
|
|
documents = text_splitter.split_documents(documents)
|
|
|
|
|
|
# Add the document sha1 as metadata to each document
|
|
docs_with_metadata = [Document(page_content=doc.page_content, metadata={"file_sha1": file_sha1, "file_size":file_size , "file_name": file_name, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort}) for doc in documents]
|
|
|
|
vector_store.add_documents(docs_with_metadata)
|
|
# if stats_db:
|
|
# add_usage(stats_db, "embedding", "file", metadata={"file_name": file_name,"file_type": file.filename, "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
|
|
return
|
|
|
|
async def file_already_exists(supabase, file):
|
|
file_content = await file.read()
|
|
file_sha1 = compute_sha1_from_content(file_content)
|
|
response = supabase.table("documents").select("id").eq("metadata->>file_sha1", file_sha1).execute()
|
|
return len(response.data) > 0
|