quivr/backend/parsers/github.py

65 lines
2.3 KiB
Python
Raw Normal View History

2023-06-06 01:38:15 +03:00
import os
import time
from langchain.document_loaders import GitLoader
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
2023-06-28 20:39:27 +03:00
from models.brains import Brain
from models.files import File
2023-06-19 23:46:25 +03:00
from models.settings import CommonsDep
from utils.file import compute_sha1_from_content
2023-06-19 22:15:35 +03:00
from utils.vectors import Neurons
2023-06-06 01:38:15 +03:00
2023-06-28 20:39:27 +03:00
async def process_github(commons: CommonsDep, repo, enable_summarization, brain_id, user_openai_api_key):
2023-06-06 01:38:15 +03:00
random_dir_name = os.urandom(16).hex()
dateshort = time.strftime("%Y%m%d")
loader = GitLoader(
clone_url=repo,
repo_path="/tmp/" + random_dir_name,
)
documents = loader.load()
os.system("rm -rf /tmp/" + random_dir_name)
2023-06-19 18:53:07 +03:00
chunk_size = 500
2023-06-06 01:38:15 +03:00
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap)
documents = text_splitter.split_documents(documents)
print(documents[:1])
for doc in documents:
if doc.metadata["file_type"] in [".pyc",".png",".svg", ".env", ".lock", ".gitignore", ".gitmodules", ".gitattributes", ".gitkeep", ".git", ".json"]:
2023-06-06 01:38:15 +03:00
continue
metadata = {
"file_sha1": compute_sha1_from_content(doc.page_content.encode("utf-8")),
"file_size": len(doc.page_content)*8,
"file_name": doc.metadata["file_name"],
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"date": dateshort,
"summarization": "true" if enable_summarization else "false"
}
doc_with_metadata = Document(
page_content=doc.page_content, metadata=metadata)
2023-06-28 20:39:27 +03:00
file = File(file_sha1 = compute_sha1_from_content(doc.page_content.encode("utf-8")))
exist = file.file_already_exists(brain_id)
2023-06-06 01:38:15 +03:00
if not exist:
2023-06-19 22:15:35 +03:00
neurons = Neurons(commons=commons)
2023-06-28 20:39:27 +03:00
created_vector = neurons.create_vector(doc_with_metadata, user_openai_api_key)
created_vector_id = created_vector[0]
brain = Brain(id=brain_id)
brain.create_brain_vector(created_vector_id)
2023-06-06 01:38:15 +03:00
print("Created vector for ", doc.metadata["file_name"])
2023-06-28 20:39:27 +03:00
# add created_vector x brains in db
2023-06-06 01:38:15 +03:00
return {"message": f"✅ Github with {len(documents)} files has been uploaded.", "type": "success"}