mirror of
https://github.com/StanGirard/quivr.git
synced 2024-12-25 04:12:44 +03:00
8af6d61e76
* reorganize import level * add __init__, reorganize import from __init__ * reorganize import level * reorganize import level * fix circular import error by keep the import deep as "from models.settings" * fix the relative import * restor unwanted staged files * add backend/venv and backend/.env to gitignore * clean importing
41 lines
1.3 KiB
Python
41 lines
1.3 KiB
Python
import time
|
|
|
|
from langchain.schema import Document
|
|
from models import Brain, File
|
|
from utils.vectors import Neurons
|
|
|
|
|
|
async def process_file(
|
|
file: File,
|
|
loader_class,
|
|
enable_summarization,
|
|
brain_id,
|
|
user_openai_api_key,
|
|
):
|
|
dateshort = time.strftime("%Y%m%d")
|
|
|
|
file.compute_documents(loader_class)
|
|
|
|
for doc in file.documents: # pyright: ignore reportPrivateUsage=none
|
|
metadata = {
|
|
"file_sha1": file.file_sha1,
|
|
"file_size": file.file_size,
|
|
"file_name": file.file_name,
|
|
"chunk_size": file.chunk_size,
|
|
"chunk_overlap": file.chunk_overlap,
|
|
"date": dateshort,
|
|
"summarization": "true" if enable_summarization else "false",
|
|
}
|
|
doc_with_metadata = Document(page_content=doc.page_content, metadata=metadata)
|
|
|
|
neurons = Neurons()
|
|
created_vector = neurons.create_vector(doc_with_metadata, user_openai_api_key)
|
|
# add_usage(stats_db, "embedding", "audio", metadata={"file_name": file_meta_name,"file_type": ".txt", "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
|
|
|
|
created_vector_id = created_vector[0] # pyright: ignore reportPrivateUsage=none
|
|
|
|
brain = Brain(id=brain_id)
|
|
brain.create_brain_vector(created_vector_id, file.file_sha1)
|
|
|
|
return
|