diff --git a/.vscode/settings.json b/.vscode/settings.json index efb59a1c4..fc2130bb5 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,8 +5,6 @@ "source.fixAll": "explicit", "source.unusedImports": "explicit" }, - "python.linting.enabled": true, - "python.linting.flake8Enabled": true, "python.analysis.extraPaths": [ "./backend" ], @@ -54,6 +52,5 @@ ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, - "python.envFile": "${workspaceFolder}/.env_test", } \ No newline at end of file diff --git a/backend/Dockerfile.dev b/backend/Dockerfile.dev index f9cd3f062..6e13eb76e 100644 --- a/backend/Dockerfile.dev +++ b/backend/Dockerfile.dev @@ -43,8 +43,6 @@ RUN pip install --upgrade pip # Increase timeout to wait for the new installation RUN pip install --no-cache-dir -r requirements.txt --timeout 200 -RUN if [ "$DEV_MODE" = "true" ]; then pip install --no-cache debugpy --timeout 200; fi - WORKDIR /code # Copy the rest of the application COPY . . diff --git a/backend/celery_worker.py b/backend/celery_worker.py index 990d7d6eb..e272775f2 100644 --- a/backend/celery_worker.py +++ b/backend/celery_worker.py @@ -1,7 +1,7 @@ import asyncio import io import os -from datetime import datetime, timedelta, timezone +from datetime import datetime, timezone from celery.schedules import crontab from celery_config import celery @@ -191,15 +191,17 @@ def process_integration_brain_sync(): # only call process_integration_brain_sync_user_brain if more than 1 day has passed since the last sync if not integrations: return - for integration in integrations: - print(f"last_synced: {integration.last_synced}") # Add this line - last_synced = datetime.strptime( - integration.last_synced, "%Y-%m-%dT%H:%M:%S.%f%z" - ) - if last_synced < time - timedelta(hours=12): - process_integration_brain_sync_user_brain.delay( - brain_id=integration.brain_id, user_id=integration.user_id - ) + # TODO fix this + # for integration in integrations: + # print(f"last_synced: {integration.last_synced}") + # print(f"Integration Name: {integration.name}") + # last_synced = datetime.strptime( + # integration.last_synced, "%Y-%m-%dT%H:%M:%S.%f%z" + # ) + # if last_synced < time - timedelta(hours=12) and integration.name == "notion": + # process_integration_brain_sync_user_brain.delay( + # brain_id=integration.brain_id, user_id=integration.user_id + # ) celery.conf.beat_schedule = { diff --git a/backend/logger.py b/backend/logger.py index 0c7cef91f..79214ed8e 100644 --- a/backend/logger.py +++ b/backend/logger.py @@ -6,7 +6,7 @@ from colorlog import ( ) # You need to install this package: pip install colorlog -def get_logger(logger_name, log_level=logging.INFO, log_file="application.log"): +def get_logger(logger_name, log_level=logging.WARNING, log_file="application.log"): logger = logging.getLogger(logger_name) logger.setLevel(log_level) logger.propagate = False # Prevent log propagation to avoid double logging diff --git a/backend/main.py b/backend/main.py index 2ba513191..279afcb49 100644 --- a/backend/main.py +++ b/backend/main.py @@ -7,6 +7,7 @@ if __name__ == "__main__": load_dotenv() import sentry_sdk +import litellm from fastapi import FastAPI, HTTPException from fastapi.responses import JSONResponse from logger import get_logger @@ -24,20 +25,23 @@ from modules.prompt.controller import prompt_router from modules.upload.controller import upload_router from modules.user.controller import user_router from packages.utils import handle_request_validation_error -from packages.utils.telemetry import send_telemetry +from packages.utils.telemetry import maybe_send_telemetry from routes.crawl_routes import crawl_router from routes.subscription_routes import subscription_router from sentry_sdk.integrations.fastapi import FastApiIntegration from sentry_sdk.integrations.starlette import StarletteIntegration +import logging + +# Set the logging level for all loggers to WARNING +logging.basicConfig(level=logging.INFO) +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("LiteLLM").setLevel(logging.WARNING) +logging.getLogger("litellm").setLevel(logging.WARNING) +litellm.set_verbose = False + logger = get_logger(__name__) -if os.getenv("DEV_MODE") == "true": - import debugpy - - logger.debug("👨‍💻 Running in dev mode") - debugpy.listen(("0.0.0.0", 5678)) - def before_send(event, hint): # If this is a transaction event @@ -100,11 +104,11 @@ if os.getenv("TELEMETRY_ENABLED") == "true": logger.info( "To disable telemetry, set the TELEMETRY_ENABLED environment variable to false." ) - send_telemetry("booting", {"status": "ok"}) + maybe_send_telemetry("booting", {"status": "ok"}) if __name__ == "__main__": # run main.py to debug backend import uvicorn - uvicorn.run(app, host="0.0.0.0", port=5050) + uvicorn.run(app, host="0.0.0.0", port=5050, log_level="warning", access_log=False) diff --git a/backend/models/databases/supabase/supabase.py b/backend/models/databases/supabase/supabase.py index f83ba5896..ffe8ed54d 100644 --- a/backend/models/databases/supabase/supabase.py +++ b/backend/models/databases/supabase/supabase.py @@ -1,8 +1,5 @@ -from logger import get_logger from models.databases.supabase import BrainSubscription, File, UserUsage, Vector -logger = get_logger(__name__) - class SupabaseDB( UserUsage, diff --git a/backend/models/databases/supabase/user_usage.py b/backend/models/databases/supabase/user_usage.py index 8dd3ed980..767d3af9b 100644 --- a/backend/models/databases/supabase/user_usage.py +++ b/backend/models/databases/supabase/user_usage.py @@ -158,12 +158,9 @@ class UserUsage(Repository): return False, False except Exception as e: - logger.info(matching_customers) - logger.error(e) - logger.error( - "Error while checking if user is a premium user. Stripe needs to be configured." + logger.info( + "Stripe needs to be configured if you want to have the premium features" ) - logger.error(e) return False, True def get_user_settings(self, user_id): diff --git a/backend/models/settings.py b/backend/models/settings.py index 86cff2cb4..f12cfa3bb 100644 --- a/backend/models/settings.py +++ b/backend/models/settings.py @@ -1,7 +1,7 @@ from uuid import UUID from langchain.embeddings.ollama import OllamaEmbeddings -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from logger import get_logger from models.databases.supabase.supabase import SupabaseDB from posthog import Posthog diff --git a/backend/modules/brain/controller/brain_routes.py b/backend/modules/brain/controller/brain_routes.py index 1d4927203..f71a48cdc 100644 --- a/backend/modules/brain/controller/brain_routes.py +++ b/backend/modules/brain/controller/brain_routes.py @@ -20,7 +20,7 @@ from modules.brain.service.integration_brain_service import ( ) from modules.prompt.service.prompt_service import PromptService from modules.user.entity.user_identity import UserIdentity -from packages.utils.telemetry import send_telemetry +from packages.utils.telemetry import maybe_send_telemetry from repository.brain import get_question_context_from_brain logger = get_logger(__name__) @@ -109,12 +109,11 @@ async def create_new_brain( status_code=429, detail=f"Maximum number of brains reached ({user_settings.get('max_brains', 5)}).", ) - send_telemetry("create_brain", {"brain_name": brain.name}) + maybe_send_telemetry("create_brain", {"brain_name": brain.name}) new_brain = brain_service.create_brain( brain=brain, user_id=current_user.id, ) - logger.info(f"Creating default brain for user {current_user.id}.") brain_user_service.create_brain_user( user_id=current_user.id, brain_id=new_brain.brain_id, diff --git a/backend/modules/chat/controller/chat_routes.py b/backend/modules/chat/controller/chat_routes.py index 9a0433244..b4ad230d5 100644 --- a/backend/modules/chat/controller/chat_routes.py +++ b/backend/modules/chat/controller/chat_routes.py @@ -4,7 +4,7 @@ from uuid import UUID from fastapi import APIRouter, Depends, HTTPException, Query, Request from fastapi.responses import StreamingResponse from langchain.embeddings.ollama import OllamaEmbeddings -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from logger import get_logger from middlewares.auth import AuthBearer, get_current_user from models.settings import BrainSettings, get_supabase_client @@ -22,7 +22,7 @@ from modules.chat.entity.chat import Chat from modules.chat.service.chat_service import ChatService from modules.notification.service.notification_service import NotificationService from modules.user.entity.user_identity import UserIdentity -from packages.utils.telemetry import send_telemetry +from packages.utils.telemetry import maybe_send_telemetry from vectorstore.supabase import CustomSupabaseVectorStore logger = get_logger(__name__) @@ -78,7 +78,7 @@ def get_answer_generator( brain_id, chat_question.question, current_user, chat_id, history, vector_store ) - send_telemetry("question_asked", {"model_name": brain.model}) + maybe_send_telemetry("question_asked", {"model_name": brain.model}) gpt_answer_generator = chat_instance.get_answer_generator( brain=brain, @@ -162,7 +162,7 @@ async def update_chat_message( chat_id: UUID, message_id: UUID, current_user: UserIdentity = Depends(get_current_user), -) : +): chat = chat_service.get_chat_by_id( chat_id # pyright: ignore reportPrivateUsage=none diff --git a/backend/modules/upload/controller/upload_routes.py b/backend/modules/upload/controller/upload_routes.py index 95bbcef44..0345daa0c 100644 --- a/backend/modules/upload/controller/upload_routes.py +++ b/backend/modules/upload/controller/upload_routes.py @@ -21,7 +21,7 @@ from modules.notification.entity.notification import NotificationsStatusEnum from modules.notification.service.notification_service import NotificationService from modules.user.entity.user_identity import UserIdentity from packages.files.file import convert_bytes, get_file_size -from packages.utils.telemetry import send_telemetry +from packages.utils.telemetry import maybe_send_telemetry from repository.files.upload_file import upload_file_storage logger = get_logger(__name__) @@ -47,7 +47,6 @@ async def upload_file( brain_id, current_user.id, [RoleEnum.Editor, RoleEnum.Owner] ) uploadFile.file.seek(0) - logger.info(f"Uploading file {uploadFile.filename} to brain {brain_id}") user_daily_usage = UserUsage( id=current_user.id, email=current_user.email, @@ -56,7 +55,7 @@ async def upload_file( user_settings = user_daily_usage.get_user_settings() remaining_free_space = user_settings.get("max_brain_size", 1000000000) - send_telemetry("upload_file", {"file_name": uploadFile.filename}) + maybe_send_telemetry("upload_file", {"file_name": uploadFile.filename}) file_size = get_file_size(uploadFile) if remaining_free_space - file_size < 0: message = f"Brain will exceed maximum capacity. Maximum file allowed is : {convert_bytes(remaining_free_space)}" @@ -72,13 +71,11 @@ async def upload_file( ) file_content = await uploadFile.read() - logger.info(f"File {uploadFile.filename} read successfully") - logger.info(f"Content length: {len(file_content)}") + filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename) try: file_in_storage = upload_file_storage(file_content, filename_with_brain_id) - logger.info(f"File {file_in_storage} uploaded successfully") except Exception as e: print(e) @@ -113,7 +110,6 @@ async def upload_file( ) added_knowledge = knowledge_service.add_knowledge(knowledge_to_add) - logger.info(f"Knowledge {added_knowledge} added successfully") process_file_and_notify.delay( file_name=filename_with_brain_id, diff --git a/backend/packages/utils/telemetry.py b/backend/packages/utils/telemetry.py index b256ff76c..afb4954c0 100644 --- a/backend/packages/utils/telemetry.py +++ b/backend/packages/utils/telemetry.py @@ -22,7 +22,7 @@ def generate_machine_key(): return unique_key -def send_telemetry(event_name: str, event_data: dict): +def send_telemetry(event_name: str, event_data: dict): # Generate a unique machine key machine_key = generate_machine_key() diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index a4cf43b16..a908b1238 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -24,6 +24,8 @@ services: - "5050" - "--workers" - "6" + - "--log-level" + - "info" restart: always ports: diff --git a/frontend/app/App.tsx b/frontend/app/App.tsx index 5c409a879..71ab3f0cd 100644 --- a/frontend/app/App.tsx +++ b/frontend/app/App.tsx @@ -1,7 +1,6 @@ "use client"; import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; -import Script from "next/script"; import { posthog } from "posthog-js"; import { PostHogProvider } from "posthog-js/react"; import { PropsWithChildren, useEffect } from "react"; @@ -59,11 +58,6 @@ const App = ({ children }: PropsWithChildren): JSX.Element => { return ( <> -