mirror of
https://github.com/QuivrHQ/quivr.git
synced 2024-08-16 09:50:22 +03:00
fix: logger level and telemetry function calls (#2409)
# Description Please include a summary of the changes and the related issue. Please also include relevant motivation and context. ## Checklist before requesting a review Please delete options that are not relevant. - [ ] My code follows the style guidelines of this project - [ ] I have performed a self-review of my code - [ ] I have commented hard-to-understand areas - [ ] I have ideally added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] Any dependent changes have been merged ## Screenshots (if appropriate):
This commit is contained in:
parent
d9d70a7ade
commit
68db4e0361
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@ -5,8 +5,6 @@
|
||||
"source.fixAll": "explicit",
|
||||
"source.unusedImports": "explicit"
|
||||
},
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.flake8Enabled": true,
|
||||
"python.analysis.extraPaths": [
|
||||
"./backend"
|
||||
],
|
||||
@ -54,6 +52,5 @@
|
||||
],
|
||||
"python.testing.unittestEnabled": false,
|
||||
"python.testing.pytestEnabled": true,
|
||||
|
||||
"python.envFile": "${workspaceFolder}/.env_test",
|
||||
}
|
@ -43,8 +43,6 @@ RUN pip install --upgrade pip
|
||||
# Increase timeout to wait for the new installation
|
||||
RUN pip install --no-cache-dir -r requirements.txt --timeout 200
|
||||
|
||||
RUN if [ "$DEV_MODE" = "true" ]; then pip install --no-cache debugpy --timeout 200; fi
|
||||
|
||||
WORKDIR /code
|
||||
# Copy the rest of the application
|
||||
COPY . .
|
||||
|
@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import io
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from celery.schedules import crontab
|
||||
from celery_config import celery
|
||||
@ -191,15 +191,17 @@ def process_integration_brain_sync():
|
||||
# only call process_integration_brain_sync_user_brain if more than 1 day has passed since the last sync
|
||||
if not integrations:
|
||||
return
|
||||
for integration in integrations:
|
||||
print(f"last_synced: {integration.last_synced}") # Add this line
|
||||
last_synced = datetime.strptime(
|
||||
integration.last_synced, "%Y-%m-%dT%H:%M:%S.%f%z"
|
||||
)
|
||||
if last_synced < time - timedelta(hours=12):
|
||||
process_integration_brain_sync_user_brain.delay(
|
||||
brain_id=integration.brain_id, user_id=integration.user_id
|
||||
)
|
||||
# TODO fix this
|
||||
# for integration in integrations:
|
||||
# print(f"last_synced: {integration.last_synced}")
|
||||
# print(f"Integration Name: {integration.name}")
|
||||
# last_synced = datetime.strptime(
|
||||
# integration.last_synced, "%Y-%m-%dT%H:%M:%S.%f%z"
|
||||
# )
|
||||
# if last_synced < time - timedelta(hours=12) and integration.name == "notion":
|
||||
# process_integration_brain_sync_user_brain.delay(
|
||||
# brain_id=integration.brain_id, user_id=integration.user_id
|
||||
# )
|
||||
|
||||
|
||||
celery.conf.beat_schedule = {
|
||||
|
@ -6,7 +6,7 @@ from colorlog import (
|
||||
) # You need to install this package: pip install colorlog
|
||||
|
||||
|
||||
def get_logger(logger_name, log_level=logging.INFO, log_file="application.log"):
|
||||
def get_logger(logger_name, log_level=logging.WARNING, log_file="application.log"):
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.setLevel(log_level)
|
||||
logger.propagate = False # Prevent log propagation to avoid double logging
|
||||
|
@ -7,6 +7,7 @@ if __name__ == "__main__":
|
||||
|
||||
load_dotenv()
|
||||
import sentry_sdk
|
||||
import litellm
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from logger import get_logger
|
||||
@ -24,20 +25,23 @@ from modules.prompt.controller import prompt_router
|
||||
from modules.upload.controller import upload_router
|
||||
from modules.user.controller import user_router
|
||||
from packages.utils import handle_request_validation_error
|
||||
from packages.utils.telemetry import send_telemetry
|
||||
from packages.utils.telemetry import maybe_send_telemetry
|
||||
from routes.crawl_routes import crawl_router
|
||||
from routes.subscription_routes import subscription_router
|
||||
from sentry_sdk.integrations.fastapi import FastApiIntegration
|
||||
from sentry_sdk.integrations.starlette import StarletteIntegration
|
||||
import logging
|
||||
|
||||
# Set the logging level for all loggers to WARNING
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
|
||||
logging.getLogger("litellm").setLevel(logging.WARNING)
|
||||
litellm.set_verbose = False
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
if os.getenv("DEV_MODE") == "true":
|
||||
import debugpy
|
||||
|
||||
logger.debug("👨💻 Running in dev mode")
|
||||
debugpy.listen(("0.0.0.0", 5678))
|
||||
|
||||
|
||||
def before_send(event, hint):
|
||||
# If this is a transaction event
|
||||
@ -100,11 +104,11 @@ if os.getenv("TELEMETRY_ENABLED") == "true":
|
||||
logger.info(
|
||||
"To disable telemetry, set the TELEMETRY_ENABLED environment variable to false."
|
||||
)
|
||||
send_telemetry("booting", {"status": "ok"})
|
||||
maybe_send_telemetry("booting", {"status": "ok"})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run main.py to debug backend
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=5050)
|
||||
uvicorn.run(app, host="0.0.0.0", port=5050, log_level="warning", access_log=False)
|
||||
|
@ -1,8 +1,5 @@
|
||||
from logger import get_logger
|
||||
from models.databases.supabase import BrainSubscription, File, UserUsage, Vector
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class SupabaseDB(
|
||||
UserUsage,
|
||||
|
@ -158,12 +158,9 @@ class UserUsage(Repository):
|
||||
return False, False
|
||||
|
||||
except Exception as e:
|
||||
logger.info(matching_customers)
|
||||
logger.error(e)
|
||||
logger.error(
|
||||
"Error while checking if user is a premium user. Stripe needs to be configured."
|
||||
logger.info(
|
||||
"Stripe needs to be configured if you want to have the premium features"
|
||||
)
|
||||
logger.error(e)
|
||||
return False, True
|
||||
|
||||
def get_user_settings(self, user_id):
|
||||
|
@ -1,7 +1,7 @@
|
||||
from uuid import UUID
|
||||
|
||||
from langchain.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from logger import get_logger
|
||||
from models.databases.supabase.supabase import SupabaseDB
|
||||
from posthog import Posthog
|
||||
|
@ -20,7 +20,7 @@ from modules.brain.service.integration_brain_service import (
|
||||
)
|
||||
from modules.prompt.service.prompt_service import PromptService
|
||||
from modules.user.entity.user_identity import UserIdentity
|
||||
from packages.utils.telemetry import send_telemetry
|
||||
from packages.utils.telemetry import maybe_send_telemetry
|
||||
from repository.brain import get_question_context_from_brain
|
||||
|
||||
logger = get_logger(__name__)
|
||||
@ -109,12 +109,11 @@ async def create_new_brain(
|
||||
status_code=429,
|
||||
detail=f"Maximum number of brains reached ({user_settings.get('max_brains', 5)}).",
|
||||
)
|
||||
send_telemetry("create_brain", {"brain_name": brain.name})
|
||||
maybe_send_telemetry("create_brain", {"brain_name": brain.name})
|
||||
new_brain = brain_service.create_brain(
|
||||
brain=brain,
|
||||
user_id=current_user.id,
|
||||
)
|
||||
logger.info(f"Creating default brain for user {current_user.id}.")
|
||||
brain_user_service.create_brain_user(
|
||||
user_id=current_user.id,
|
||||
brain_id=new_brain.brain_id,
|
||||
|
@ -4,7 +4,7 @@ from uuid import UUID
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
from langchain.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from logger import get_logger
|
||||
from middlewares.auth import AuthBearer, get_current_user
|
||||
from models.settings import BrainSettings, get_supabase_client
|
||||
@ -22,7 +22,7 @@ from modules.chat.entity.chat import Chat
|
||||
from modules.chat.service.chat_service import ChatService
|
||||
from modules.notification.service.notification_service import NotificationService
|
||||
from modules.user.entity.user_identity import UserIdentity
|
||||
from packages.utils.telemetry import send_telemetry
|
||||
from packages.utils.telemetry import maybe_send_telemetry
|
||||
from vectorstore.supabase import CustomSupabaseVectorStore
|
||||
|
||||
logger = get_logger(__name__)
|
||||
@ -78,7 +78,7 @@ def get_answer_generator(
|
||||
brain_id, chat_question.question, current_user, chat_id, history, vector_store
|
||||
)
|
||||
|
||||
send_telemetry("question_asked", {"model_name": brain.model})
|
||||
maybe_send_telemetry("question_asked", {"model_name": brain.model})
|
||||
|
||||
gpt_answer_generator = chat_instance.get_answer_generator(
|
||||
brain=brain,
|
||||
@ -162,7 +162,7 @@ async def update_chat_message(
|
||||
chat_id: UUID,
|
||||
message_id: UUID,
|
||||
current_user: UserIdentity = Depends(get_current_user),
|
||||
) :
|
||||
):
|
||||
|
||||
chat = chat_service.get_chat_by_id(
|
||||
chat_id # pyright: ignore reportPrivateUsage=none
|
||||
|
@ -21,7 +21,7 @@ from modules.notification.entity.notification import NotificationsStatusEnum
|
||||
from modules.notification.service.notification_service import NotificationService
|
||||
from modules.user.entity.user_identity import UserIdentity
|
||||
from packages.files.file import convert_bytes, get_file_size
|
||||
from packages.utils.telemetry import send_telemetry
|
||||
from packages.utils.telemetry import maybe_send_telemetry
|
||||
from repository.files.upload_file import upload_file_storage
|
||||
|
||||
logger = get_logger(__name__)
|
||||
@ -47,7 +47,6 @@ async def upload_file(
|
||||
brain_id, current_user.id, [RoleEnum.Editor, RoleEnum.Owner]
|
||||
)
|
||||
uploadFile.file.seek(0)
|
||||
logger.info(f"Uploading file {uploadFile.filename} to brain {brain_id}")
|
||||
user_daily_usage = UserUsage(
|
||||
id=current_user.id,
|
||||
email=current_user.email,
|
||||
@ -56,7 +55,7 @@ async def upload_file(
|
||||
user_settings = user_daily_usage.get_user_settings()
|
||||
|
||||
remaining_free_space = user_settings.get("max_brain_size", 1000000000)
|
||||
send_telemetry("upload_file", {"file_name": uploadFile.filename})
|
||||
maybe_send_telemetry("upload_file", {"file_name": uploadFile.filename})
|
||||
file_size = get_file_size(uploadFile)
|
||||
if remaining_free_space - file_size < 0:
|
||||
message = f"Brain will exceed maximum capacity. Maximum file allowed is : {convert_bytes(remaining_free_space)}"
|
||||
@ -72,13 +71,11 @@ async def upload_file(
|
||||
)
|
||||
|
||||
file_content = await uploadFile.read()
|
||||
logger.info(f"File {uploadFile.filename} read successfully")
|
||||
logger.info(f"Content length: {len(file_content)}")
|
||||
|
||||
filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename)
|
||||
|
||||
try:
|
||||
file_in_storage = upload_file_storage(file_content, filename_with_brain_id)
|
||||
logger.info(f"File {file_in_storage} uploaded successfully")
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
@ -113,7 +110,6 @@ async def upload_file(
|
||||
)
|
||||
|
||||
added_knowledge = knowledge_service.add_knowledge(knowledge_to_add)
|
||||
logger.info(f"Knowledge {added_knowledge} added successfully")
|
||||
|
||||
process_file_and_notify.delay(
|
||||
file_name=filename_with_brain_id,
|
||||
|
@ -22,7 +22,7 @@ def generate_machine_key():
|
||||
return unique_key
|
||||
|
||||
|
||||
def send_telemetry(event_name: str, event_data: dict):
|
||||
def send_telemetry(event_name: str, event_data: dict):
|
||||
# Generate a unique machine key
|
||||
machine_key = generate_machine_key()
|
||||
|
||||
|
@ -24,6 +24,8 @@ services:
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "6"
|
||||
- "--log-level"
|
||||
- "info"
|
||||
restart: always
|
||||
|
||||
ports:
|
||||
|
@ -1,7 +1,6 @@
|
||||
"use client";
|
||||
|
||||
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
|
||||
import Script from "next/script";
|
||||
import { posthog } from "posthog-js";
|
||||
import { PostHogProvider } from "posthog-js/react";
|
||||
import { PropsWithChildren, useEffect } from "react";
|
||||
@ -59,11 +58,6 @@ const App = ({ children }: PropsWithChildren): JSX.Element => {
|
||||
|
||||
return (
|
||||
<>
|
||||
<Script
|
||||
id="octolane-script"
|
||||
src="https://cdn.octolane.com/tag.js?pk=0a213725640302dff773"
|
||||
/>
|
||||
|
||||
<PostHogProvider client={posthog}>
|
||||
<IntercomProvider>
|
||||
<div className="flex flex-1 flex-col overflow-auto">
|
||||
|
@ -54,7 +54,6 @@ const ContentSecurityPolicy = {
|
||||
"https://chat.quivr.app",
|
||||
"*.intercom.io",
|
||||
"*.intercomcdn.com",
|
||||
"https://*.octolane.com",
|
||||
"https://*.vercel.app",
|
||||
process.env.NEXT_PUBLIC_FRONTEND_URL,
|
||||
],
|
||||
@ -65,7 +64,6 @@ const ContentSecurityPolicy = {
|
||||
process.env.NEXT_PUBLIC_CMS_URL,
|
||||
"*.intercom.io",
|
||||
"*.intercomcdn.com",
|
||||
"https://*.octolane.com",
|
||||
"https://api.june.so",
|
||||
"https://api.openai.com",
|
||||
"https://cdn.growthbook.io",
|
||||
@ -96,7 +94,6 @@ const ContentSecurityPolicy = {
|
||||
"https://va.vercel-scripts.com/",
|
||||
"*.intercom.io",
|
||||
"*.intercomcdn.com",
|
||||
"https://*.octolane.com",
|
||||
process.env.NEXT_PUBLIC_FRONTEND_URL,
|
||||
"https://preview.quivr.app",
|
||||
"https://*.vercel.app",
|
||||
|
Loading…
Reference in New Issue
Block a user