refactor: create "files" package (#1626)

# Description

Please include a summary of the changes and the related issue. Please
also include relevant motivation and context.

## Checklist before requesting a review

Please delete options that are not relevant.

- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my code
- [ ] I have commented hard-to-understand areas
- [ ] I have ideally added tests that prove my fix is effective or that
my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged

## Screenshots (if appropriate):
This commit is contained in:
Zineb El Bachiri 2023-11-14 09:52:44 +01:00 committed by GitHub
parent 5a3f284785
commit 8f693bc92a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 25 additions and 27 deletions

View File

@ -10,15 +10,13 @@ from models.databases.supabase.notifications import NotificationUpdatablePropert
from models.files import File
from models.notifications import NotificationsStatusEnum
from models.settings import get_supabase_client
from parsers.github import process_github
from repository.brain.update_brain_last_update_time import (
update_brain_last_update_time,
)
from packages.files.parsers.github import process_github
from packages.files.processors import filter_file
from repository.brain.update_brain_last_update_time import update_brain_last_update_time
from repository.notification.update_notification import update_notification_by_id
from repository.onboarding.remove_onboarding_more_than_x_days import (
remove_onboarding_more_than_x_days,
)
from utils.processors import filter_file
CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "")
CELEBRY_BROKER_QUEUE_NAME = os.getenv("CELEBRY_BROKER_QUEUE_NAME", "quivr")

View File

@ -9,8 +9,8 @@ from logger import get_logger
from models.brains import Brain
from models.databases.supabase.supabase import SupabaseDB
from models.settings import get_supabase_db
from packages.files.file import compute_sha1_from_file
from pydantic import BaseModel
from utils.file import compute_sha1_from_file
logger = get_logger(__name__)

View File

@ -5,9 +5,8 @@ import time
import openai
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from models import File, get_documents_vector_store
from utils.file import compute_sha1_from_content
from packages.files.file import compute_sha1_from_content
async def process_audio(

View File

@ -5,7 +5,7 @@ from langchain.document_loaders import GitLoader
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from models import Brain, File
from utils.file import compute_sha1_from_content
from packages.files.file import compute_sha1_from_content
from utils.vectors import Neurons

View File

@ -1,21 +1,21 @@
from models.brains import Brain
from models.files import File
from parsers.audio import process_audio
from parsers.code_python import process_python
from parsers.csv import process_csv
from parsers.docx import process_docx
from parsers.epub import process_epub
from parsers.html import process_html
from parsers.markdown import process_markdown
from parsers.notebook import process_ipnyb
from parsers.odt import process_odt
from parsers.pdf import process_pdf
from parsers.powerpoint import process_powerpoint
from parsers.telegram import process_telegram
from parsers.txt import process_txt
from parsers.xlsx import process_xlsx
from repository.brain.get_brain_by_id import get_brain_by_id
from .parsers.audio import process_audio
from .parsers.code_python import process_python
from .parsers.csv import process_csv
from .parsers.docx import process_docx
from .parsers.epub import process_epub
from .parsers.html import process_html
from .parsers.markdown import process_markdown
from .parsers.notebook import process_ipnyb
from .parsers.odt import process_odt
from .parsers.pdf import process_pdf
from .parsers.powerpoint import process_powerpoint
from .parsers.telegram import process_telegram
from .parsers.txt import process_txt
from .parsers.xlsx import process_xlsx
file_processors = {
".txt": process_txt,
".csv": process_csv,
@ -46,8 +46,9 @@ def create_response(message, type):
return {"message": message, "type": type}
# TODO: Move filter_file to a file service to avoid circular imports from models/files.py for File class
async def filter_file(
file: File,
file,
enable_summarization: bool,
brain_id,
openai_api_key,

View File

@ -10,9 +10,9 @@ from models import Brain, UserIdentity, UserUsage
from models.databases.supabase.knowledge import CreateKnowledgeProperties
from models.databases.supabase.notifications import CreateNotificationProperties
from models.notifications import NotificationsStatusEnum
from packages.files.file import convert_bytes
from repository.knowledge.add_knowledge import add_knowledge
from repository.notification.add_notification import add_notification
from utils.file import convert_bytes
logger = get_logger(__name__)
crawl_router = APIRouter()

View File

@ -10,6 +10,7 @@ from models import Brain, UserIdentity, UserUsage
from models.databases.supabase.knowledge import CreateKnowledgeProperties
from models.databases.supabase.notifications import CreateNotificationProperties
from models.notifications import NotificationsStatusEnum
from packages.files.file import convert_bytes, get_file_size
from repository.brain import get_brain_details
from repository.files.upload_file import upload_file_storage
from repository.knowledge.add_knowledge import add_knowledge
@ -19,7 +20,6 @@ from routes.authorizations.brain_authorization import (
RoleEnum,
validate_brain_authorization,
)
from utils.file import convert_bytes, get_file_size
logger = get_logger(__name__)
upload_router = APIRouter()