fix: logger level and telemetry function calls (#2409)

# Description

Please include a summary of the changes and the related issue. Please
also include relevant motivation and context.

## Checklist before requesting a review

Please delete options that are not relevant.

- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my code
- [ ] I have commented hard-to-understand areas
- [ ] I have ideally added tests that prove my fix is effective or that
my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged

## Screenshots (if appropriate):
This commit is contained in:
Stan Girard 2024-04-06 18:35:57 -07:00 committed by GitHub
parent d9d70a7ade
commit 68db4e0361
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 41 additions and 58 deletions

View File

@ -5,8 +5,6 @@
"source.fixAll": "explicit", "source.fixAll": "explicit",
"source.unusedImports": "explicit" "source.unusedImports": "explicit"
}, },
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.analysis.extraPaths": [ "python.analysis.extraPaths": [
"./backend" "./backend"
], ],
@ -54,6 +52,5 @@
], ],
"python.testing.unittestEnabled": false, "python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true, "python.testing.pytestEnabled": true,
"python.envFile": "${workspaceFolder}/.env_test", "python.envFile": "${workspaceFolder}/.env_test",
} }

View File

@ -43,8 +43,6 @@ RUN pip install --upgrade pip
# Increase timeout to wait for the new installation # Increase timeout to wait for the new installation
RUN pip install --no-cache-dir -r requirements.txt --timeout 200 RUN pip install --no-cache-dir -r requirements.txt --timeout 200
RUN if [ "$DEV_MODE" = "true" ]; then pip install --no-cache debugpy --timeout 200; fi
WORKDIR /code WORKDIR /code
# Copy the rest of the application # Copy the rest of the application
COPY . . COPY . .

View File

@ -1,7 +1,7 @@
import asyncio import asyncio
import io import io
import os import os
from datetime import datetime, timedelta, timezone from datetime import datetime, timezone
from celery.schedules import crontab from celery.schedules import crontab
from celery_config import celery from celery_config import celery
@ -191,15 +191,17 @@ def process_integration_brain_sync():
# only call process_integration_brain_sync_user_brain if more than 1 day has passed since the last sync # only call process_integration_brain_sync_user_brain if more than 1 day has passed since the last sync
if not integrations: if not integrations:
return return
for integration in integrations: # TODO fix this
print(f"last_synced: {integration.last_synced}") # Add this line # for integration in integrations:
last_synced = datetime.strptime( # print(f"last_synced: {integration.last_synced}")
integration.last_synced, "%Y-%m-%dT%H:%M:%S.%f%z" # print(f"Integration Name: {integration.name}")
) # last_synced = datetime.strptime(
if last_synced < time - timedelta(hours=12): # integration.last_synced, "%Y-%m-%dT%H:%M:%S.%f%z"
process_integration_brain_sync_user_brain.delay( # )
brain_id=integration.brain_id, user_id=integration.user_id # if last_synced < time - timedelta(hours=12) and integration.name == "notion":
) # process_integration_brain_sync_user_brain.delay(
# brain_id=integration.brain_id, user_id=integration.user_id
# )
celery.conf.beat_schedule = { celery.conf.beat_schedule = {

View File

@ -6,7 +6,7 @@ from colorlog import (
) # You need to install this package: pip install colorlog ) # You need to install this package: pip install colorlog
def get_logger(logger_name, log_level=logging.INFO, log_file="application.log"): def get_logger(logger_name, log_level=logging.WARNING, log_file="application.log"):
logger = logging.getLogger(logger_name) logger = logging.getLogger(logger_name)
logger.setLevel(log_level) logger.setLevel(log_level)
logger.propagate = False # Prevent log propagation to avoid double logging logger.propagate = False # Prevent log propagation to avoid double logging

View File

@ -7,6 +7,7 @@ if __name__ == "__main__":
load_dotenv() load_dotenv()
import sentry_sdk import sentry_sdk
import litellm
from fastapi import FastAPI, HTTPException from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from logger import get_logger from logger import get_logger
@ -24,20 +25,23 @@ from modules.prompt.controller import prompt_router
from modules.upload.controller import upload_router from modules.upload.controller import upload_router
from modules.user.controller import user_router from modules.user.controller import user_router
from packages.utils import handle_request_validation_error from packages.utils import handle_request_validation_error
from packages.utils.telemetry import send_telemetry from packages.utils.telemetry import maybe_send_telemetry
from routes.crawl_routes import crawl_router from routes.crawl_routes import crawl_router
from routes.subscription_routes import subscription_router from routes.subscription_routes import subscription_router
from sentry_sdk.integrations.fastapi import FastApiIntegration from sentry_sdk.integrations.fastapi import FastApiIntegration
from sentry_sdk.integrations.starlette import StarletteIntegration from sentry_sdk.integrations.starlette import StarletteIntegration
import logging
# Set the logging level for all loggers to WARNING
logging.basicConfig(level=logging.INFO)
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
logging.getLogger("litellm").setLevel(logging.WARNING)
litellm.set_verbose = False
logger = get_logger(__name__) logger = get_logger(__name__)
if os.getenv("DEV_MODE") == "true":
import debugpy
logger.debug("👨‍💻 Running in dev mode")
debugpy.listen(("0.0.0.0", 5678))
def before_send(event, hint): def before_send(event, hint):
# If this is a transaction event # If this is a transaction event
@ -100,11 +104,11 @@ if os.getenv("TELEMETRY_ENABLED") == "true":
logger.info( logger.info(
"To disable telemetry, set the TELEMETRY_ENABLED environment variable to false." "To disable telemetry, set the TELEMETRY_ENABLED environment variable to false."
) )
send_telemetry("booting", {"status": "ok"}) maybe_send_telemetry("booting", {"status": "ok"})
if __name__ == "__main__": if __name__ == "__main__":
# run main.py to debug backend # run main.py to debug backend
import uvicorn import uvicorn
uvicorn.run(app, host="0.0.0.0", port=5050) uvicorn.run(app, host="0.0.0.0", port=5050, log_level="warning", access_log=False)

View File

@ -1,8 +1,5 @@
from logger import get_logger
from models.databases.supabase import BrainSubscription, File, UserUsage, Vector from models.databases.supabase import BrainSubscription, File, UserUsage, Vector
logger = get_logger(__name__)
class SupabaseDB( class SupabaseDB(
UserUsage, UserUsage,

View File

@ -158,12 +158,9 @@ class UserUsage(Repository):
return False, False return False, False
except Exception as e: except Exception as e:
logger.info(matching_customers) logger.info(
logger.error(e) "Stripe needs to be configured if you want to have the premium features"
logger.error(
"Error while checking if user is a premium user. Stripe needs to be configured."
) )
logger.error(e)
return False, True return False, True
def get_user_settings(self, user_id): def get_user_settings(self, user_id):

View File

@ -1,7 +1,7 @@
from uuid import UUID from uuid import UUID
from langchain.embeddings.ollama import OllamaEmbeddings from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings from langchain_openai import OpenAIEmbeddings
from logger import get_logger from logger import get_logger
from models.databases.supabase.supabase import SupabaseDB from models.databases.supabase.supabase import SupabaseDB
from posthog import Posthog from posthog import Posthog

View File

@ -20,7 +20,7 @@ from modules.brain.service.integration_brain_service import (
) )
from modules.prompt.service.prompt_service import PromptService from modules.prompt.service.prompt_service import PromptService
from modules.user.entity.user_identity import UserIdentity from modules.user.entity.user_identity import UserIdentity
from packages.utils.telemetry import send_telemetry from packages.utils.telemetry import maybe_send_telemetry
from repository.brain import get_question_context_from_brain from repository.brain import get_question_context_from_brain
logger = get_logger(__name__) logger = get_logger(__name__)
@ -109,12 +109,11 @@ async def create_new_brain(
status_code=429, status_code=429,
detail=f"Maximum number of brains reached ({user_settings.get('max_brains', 5)}).", detail=f"Maximum number of brains reached ({user_settings.get('max_brains', 5)}).",
) )
send_telemetry("create_brain", {"brain_name": brain.name}) maybe_send_telemetry("create_brain", {"brain_name": brain.name})
new_brain = brain_service.create_brain( new_brain = brain_service.create_brain(
brain=brain, brain=brain,
user_id=current_user.id, user_id=current_user.id,
) )
logger.info(f"Creating default brain for user {current_user.id}.")
brain_user_service.create_brain_user( brain_user_service.create_brain_user(
user_id=current_user.id, user_id=current_user.id,
brain_id=new_brain.brain_id, brain_id=new_brain.brain_id,

View File

@ -4,7 +4,7 @@ from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query, Request from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
from langchain.embeddings.ollama import OllamaEmbeddings from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings from langchain_openai import OpenAIEmbeddings
from logger import get_logger from logger import get_logger
from middlewares.auth import AuthBearer, get_current_user from middlewares.auth import AuthBearer, get_current_user
from models.settings import BrainSettings, get_supabase_client from models.settings import BrainSettings, get_supabase_client
@ -22,7 +22,7 @@ from modules.chat.entity.chat import Chat
from modules.chat.service.chat_service import ChatService from modules.chat.service.chat_service import ChatService
from modules.notification.service.notification_service import NotificationService from modules.notification.service.notification_service import NotificationService
from modules.user.entity.user_identity import UserIdentity from modules.user.entity.user_identity import UserIdentity
from packages.utils.telemetry import send_telemetry from packages.utils.telemetry import maybe_send_telemetry
from vectorstore.supabase import CustomSupabaseVectorStore from vectorstore.supabase import CustomSupabaseVectorStore
logger = get_logger(__name__) logger = get_logger(__name__)
@ -78,7 +78,7 @@ def get_answer_generator(
brain_id, chat_question.question, current_user, chat_id, history, vector_store brain_id, chat_question.question, current_user, chat_id, history, vector_store
) )
send_telemetry("question_asked", {"model_name": brain.model}) maybe_send_telemetry("question_asked", {"model_name": brain.model})
gpt_answer_generator = chat_instance.get_answer_generator( gpt_answer_generator = chat_instance.get_answer_generator(
brain=brain, brain=brain,
@ -162,7 +162,7 @@ async def update_chat_message(
chat_id: UUID, chat_id: UUID,
message_id: UUID, message_id: UUID,
current_user: UserIdentity = Depends(get_current_user), current_user: UserIdentity = Depends(get_current_user),
) : ):
chat = chat_service.get_chat_by_id( chat = chat_service.get_chat_by_id(
chat_id # pyright: ignore reportPrivateUsage=none chat_id # pyright: ignore reportPrivateUsage=none

View File

@ -21,7 +21,7 @@ from modules.notification.entity.notification import NotificationsStatusEnum
from modules.notification.service.notification_service import NotificationService from modules.notification.service.notification_service import NotificationService
from modules.user.entity.user_identity import UserIdentity from modules.user.entity.user_identity import UserIdentity
from packages.files.file import convert_bytes, get_file_size from packages.files.file import convert_bytes, get_file_size
from packages.utils.telemetry import send_telemetry from packages.utils.telemetry import maybe_send_telemetry
from repository.files.upload_file import upload_file_storage from repository.files.upload_file import upload_file_storage
logger = get_logger(__name__) logger = get_logger(__name__)
@ -47,7 +47,6 @@ async def upload_file(
brain_id, current_user.id, [RoleEnum.Editor, RoleEnum.Owner] brain_id, current_user.id, [RoleEnum.Editor, RoleEnum.Owner]
) )
uploadFile.file.seek(0) uploadFile.file.seek(0)
logger.info(f"Uploading file {uploadFile.filename} to brain {brain_id}")
user_daily_usage = UserUsage( user_daily_usage = UserUsage(
id=current_user.id, id=current_user.id,
email=current_user.email, email=current_user.email,
@ -56,7 +55,7 @@ async def upload_file(
user_settings = user_daily_usage.get_user_settings() user_settings = user_daily_usage.get_user_settings()
remaining_free_space = user_settings.get("max_brain_size", 1000000000) remaining_free_space = user_settings.get("max_brain_size", 1000000000)
send_telemetry("upload_file", {"file_name": uploadFile.filename}) maybe_send_telemetry("upload_file", {"file_name": uploadFile.filename})
file_size = get_file_size(uploadFile) file_size = get_file_size(uploadFile)
if remaining_free_space - file_size < 0: if remaining_free_space - file_size < 0:
message = f"Brain will exceed maximum capacity. Maximum file allowed is : {convert_bytes(remaining_free_space)}" message = f"Brain will exceed maximum capacity. Maximum file allowed is : {convert_bytes(remaining_free_space)}"
@ -72,13 +71,11 @@ async def upload_file(
) )
file_content = await uploadFile.read() file_content = await uploadFile.read()
logger.info(f"File {uploadFile.filename} read successfully")
logger.info(f"Content length: {len(file_content)}")
filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename) filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename)
try: try:
file_in_storage = upload_file_storage(file_content, filename_with_brain_id) file_in_storage = upload_file_storage(file_content, filename_with_brain_id)
logger.info(f"File {file_in_storage} uploaded successfully")
except Exception as e: except Exception as e:
print(e) print(e)
@ -113,7 +110,6 @@ async def upload_file(
) )
added_knowledge = knowledge_service.add_knowledge(knowledge_to_add) added_knowledge = knowledge_service.add_knowledge(knowledge_to_add)
logger.info(f"Knowledge {added_knowledge} added successfully")
process_file_and_notify.delay( process_file_and_notify.delay(
file_name=filename_with_brain_id, file_name=filename_with_brain_id,

View File

@ -22,7 +22,7 @@ def generate_machine_key():
return unique_key return unique_key
def send_telemetry(event_name: str, event_data: dict): def send_telemetry(event_name: str, event_data: dict):
# Generate a unique machine key # Generate a unique machine key
machine_key = generate_machine_key() machine_key = generate_machine_key()

View File

@ -24,6 +24,8 @@ services:
- "5050" - "5050"
- "--workers" - "--workers"
- "6" - "6"
- "--log-level"
- "info"
restart: always restart: always
ports: ports:

View File

@ -1,7 +1,6 @@
"use client"; "use client";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import Script from "next/script";
import { posthog } from "posthog-js"; import { posthog } from "posthog-js";
import { PostHogProvider } from "posthog-js/react"; import { PostHogProvider } from "posthog-js/react";
import { PropsWithChildren, useEffect } from "react"; import { PropsWithChildren, useEffect } from "react";
@ -59,11 +58,6 @@ const App = ({ children }: PropsWithChildren): JSX.Element => {
return ( return (
<> <>
<Script
id="octolane-script"
src="https://cdn.octolane.com/tag.js?pk=0a213725640302dff773"
/>
<PostHogProvider client={posthog}> <PostHogProvider client={posthog}>
<IntercomProvider> <IntercomProvider>
<div className="flex flex-1 flex-col overflow-auto"> <div className="flex flex-1 flex-col overflow-auto">

View File

@ -54,7 +54,6 @@ const ContentSecurityPolicy = {
"https://chat.quivr.app", "https://chat.quivr.app",
"*.intercom.io", "*.intercom.io",
"*.intercomcdn.com", "*.intercomcdn.com",
"https://*.octolane.com",
"https://*.vercel.app", "https://*.vercel.app",
process.env.NEXT_PUBLIC_FRONTEND_URL, process.env.NEXT_PUBLIC_FRONTEND_URL,
], ],
@ -65,7 +64,6 @@ const ContentSecurityPolicy = {
process.env.NEXT_PUBLIC_CMS_URL, process.env.NEXT_PUBLIC_CMS_URL,
"*.intercom.io", "*.intercom.io",
"*.intercomcdn.com", "*.intercomcdn.com",
"https://*.octolane.com",
"https://api.june.so", "https://api.june.so",
"https://api.openai.com", "https://api.openai.com",
"https://cdn.growthbook.io", "https://cdn.growthbook.io",
@ -96,7 +94,6 @@ const ContentSecurityPolicy = {
"https://va.vercel-scripts.com/", "https://va.vercel-scripts.com/",
"*.intercom.io", "*.intercom.io",
"*.intercomcdn.com", "*.intercomcdn.com",
"https://*.octolane.com",
process.env.NEXT_PUBLIC_FRONTEND_URL, process.env.NEXT_PUBLIC_FRONTEND_URL,
"https://preview.quivr.app", "https://preview.quivr.app",
"https://*.vercel.app", "https://*.vercel.app",