diff --git a/backend/celery_task.py b/backend/celery_task.py index 939643996..7b0bb0045 100644 --- a/backend/celery_task.py +++ b/backend/celery_task.py @@ -6,12 +6,10 @@ from repository.files.upload_file import DocumentSerializable @shared_task -def create_embedding_for_document( - brain_id, doc_with_metadata, user_openai_api_key, file_sha1 -): +def create_embedding_for_document(brain_id, doc_with_metadata, file_sha1): neurons = Neurons() doc = DocumentSerializable.from_json(doc_with_metadata) - created_vector = neurons.create_vector(doc, user_openai_api_key) + created_vector = neurons.create_vector(doc) database = get_supabase_db() database.set_file_sha_from_metadata(file_sha1) diff --git a/backend/celery_worker.py b/backend/celery_worker.py index 663648fb6..2dd74d8b2 100644 --- a/backend/celery_worker.py +++ b/backend/celery_worker.py @@ -56,9 +56,7 @@ else: def process_file_and_notify( file_name: str, file_original_name: str, - enable_summarization, brain_id, - openai_api_key, notification_id=None, ): supabase_client = get_supabase_client() @@ -81,9 +79,7 @@ def process_file_and_notify( message = loop.run_until_complete( filter_file( file=file_instance, - enable_summarization=enable_summarization, brain_id=brain_id, - openai_api_key=openai_api_key, original_file_name=file_original_name, ) ) @@ -112,9 +108,7 @@ def process_file_and_notify( @celery.task(name="process_crawl_and_notify") def process_crawl_and_notify( crawl_website_url, - enable_summarization, brain_id, - openai_api_key, notification_id=None, ): crawl_website = CrawlWebsite(url=crawl_website_url) @@ -136,9 +130,7 @@ def process_crawl_and_notify( message = loop.run_until_complete( filter_file( file=file_instance, - enable_summarization=enable_summarization, brain_id=brain_id, - openai_api_key=openai_api_key, original_file_name=crawl_website_url, ) ) @@ -147,9 +139,7 @@ def process_crawl_and_notify( message = loop.run_until_complete( process_github( repo=crawl_website.url, - enable_summarization="false", brain_id=brain_id, - user_openai_api_key=openai_api_key, ) ) diff --git a/backend/llm/qa_base.py b/backend/llm/qa_base.py index ea0dee0bd..d0329e05d 100644 --- a/backend/llm/qa_base.py +++ b/backend/llm/qa_base.py @@ -63,7 +63,6 @@ class QABaseBrainPicking(BaseModel): chat_id: str = None # pyright: ignore reportPrivateUsage=none brain_id: str = None # pyright: ignore reportPrivateUsage=none max_tokens: int = 256 - user_openai_api_key: str = None # pyright: ignore reportPrivateUsage=none streaming: bool = False openai_api_key: str = None # pyright: ignore reportPrivateUsage=none @@ -71,13 +70,6 @@ class QABaseBrainPicking(BaseModel): AsyncIteratorCallbackHandler ] = None # pyright: ignore reportPrivateUsage=none - def _determine_api_key(self, openai_api_key, user_openai_api_key): - """If user provided an API key, use it.""" - if user_openai_api_key is not None: - return user_openai_api_key - else: - return openai_api_key - def _determine_streaming(self, model: str, streaming: bool) -> bool: """If the model name allows for streaming and streaming is declared, set streaming to True.""" return streaming diff --git a/backend/llm/qa_headless.py b/backend/llm/qa_headless.py index 506653108..a80d8e184 100644 --- a/backend/llm/qa_headless.py +++ b/backend/llm/qa_headless.py @@ -32,20 +32,12 @@ class HeadlessQA(BaseModel): model: str temperature: float = 0.0 max_tokens: int = 2000 - user_openai_api_key: Optional[str] = None openai_api_key: Optional[str] = None streaming: bool = False chat_id: str callbacks: Optional[List[AsyncIteratorCallbackHandler]] = None prompt_id: Optional[UUID] = None - def _determine_api_key(self, openai_api_key, user_openai_api_key): - """If user provided an API key, use it.""" - if user_openai_api_key is not None: - return user_openai_api_key - else: - return openai_api_key - def _determine_streaming(self, streaming: bool) -> bool: """If the model name allows for streaming and streaming is declared, set streaming to True.""" return streaming @@ -61,11 +53,6 @@ class HeadlessQA(BaseModel): def __init__(self, **data): super().__init__(**data) - print("in HeadlessQA") - - self.openai_api_key = self._determine_api_key( - self.openai_api_key, self.user_openai_api_key - ) self.streaming = self._determine_streaming(self.streaming) self.callbacks = self._determine_callback_array(self.streaming) diff --git a/backend/models/brain_entity.py b/backend/models/brain_entity.py index 686b5e44d..dd011458f 100644 --- a/backend/models/brain_entity.py +++ b/backend/models/brain_entity.py @@ -20,7 +20,6 @@ class BrainEntity(BaseModel): temperature: Optional[float] model: Optional[str] max_tokens: Optional[int] - openai_api_key: Optional[str] status: Optional[str] prompt_id: Optional[UUID] last_update: str diff --git a/backend/models/brains.py b/backend/models/brains.py index 705d7afb2..d43e18906 100644 --- a/backend/models/brains.py +++ b/backend/models/brains.py @@ -19,7 +19,6 @@ class Brain(BaseModel): model: Optional[str] = None temperature: Optional[float] = 0.0 max_tokens: Optional[int] = 256 - openai_api_key: Optional[str] = None files: List[Any] = [] prompt_id: Optional[UUID] = None diff --git a/backend/models/databases/supabase/brains.py b/backend/models/databases/supabase/brains.py index 23abaa993..da1b03c0e 100644 --- a/backend/models/databases/supabase/brains.py +++ b/backend/models/databases/supabase/brains.py @@ -25,7 +25,6 @@ class CreateBrainProperties(BaseModel, extra=Extra.forbid): model: Optional[str] temperature: Optional[float] = 0.0 max_tokens: Optional[int] = 256 - openai_api_key: Optional[str] = None prompt_id: Optional[UUID] = None brain_type: Optional[BrainType] = BrainType.DOC brain_definition: Optional[CreateApiBrainDefinition] @@ -44,7 +43,6 @@ class BrainUpdatableProperties(BaseModel): temperature: Optional[float] model: Optional[str] max_tokens: Optional[int] - openai_api_key: Optional[str] status: Optional[str] prompt_id: Optional[UUID] diff --git a/backend/models/settings.py b/backend/models/settings.py index a71865843..ce1a4662c 100644 --- a/backend/models/settings.py +++ b/backend/models/settings.py @@ -11,24 +11,27 @@ class BrainRateLimiting(BaseSettings): class BrainSettings(BaseSettings): openai_api_key: str - anthropic_api_key: str supabase_url: str supabase_service_key: str pg_database_url: str = "not implemented" resend_api_key: str = "null" resend_email_address: str = "brain@mail.quivr.app" + class ContactsSettings(BaseSettings): resend_contact_sales_from: str = "null" resend_contact_sales_to: str = "null" + class LLMSettings(BaseSettings): private: bool = False model_path: str = "./local_models/ggml-gpt4all-j-v1.3-groovy.bin" + class ResendSettings(BaseSettings): resend_api_key: str = "null" + def get_supabase_client() -> Client: settings = BrainSettings() # pyright: ignore reportPrivateUsage=none supabase_client: Client = create_client( diff --git a/backend/modules/user/controller/user_controller.py b/backend/modules/user/controller/user_controller.py index d341eacb6..0c10cc02b 100644 --- a/backend/modules/user/controller/user_controller.py +++ b/backend/modules/user/controller/user_controller.py @@ -33,7 +33,6 @@ async def get_user_endpoint( user_daily_usage = UserUsage( id=current_user.id, email=current_user.email, - openai_api_key=current_user.openai_api_key, ) user_settings = user_daily_usage.get_user_settings() max_brain_size = user_settings.get("max_brain_size", 1000000000) diff --git a/backend/modules/user/entity/user_identity.py b/backend/modules/user/entity/user_identity.py index 5cde6208b..6e1afebe9 100644 --- a/backend/modules/user/entity/user_identity.py +++ b/backend/modules/user/entity/user_identity.py @@ -7,4 +7,3 @@ from pydantic import BaseModel class UserIdentity(BaseModel): id: UUID email: Optional[str] = None - openai_api_key: Optional[str] = None diff --git a/backend/packages/embeddings/vectors.py b/backend/packages/embeddings/vectors.py index 19f9607a9..032663c26 100644 --- a/backend/packages/embeddings/vectors.py +++ b/backend/packages/embeddings/vectors.py @@ -2,7 +2,6 @@ from concurrent.futures import ThreadPoolExecutor from typing import List from uuid import UUID -from langchain.embeddings.openai import OpenAIEmbeddings from logger import get_logger from models.settings import get_documents_vector_store, get_embeddings, get_supabase_db from pydantic import BaseModel @@ -12,14 +11,11 @@ logger = get_logger(__name__) # TODO: Create interface for embeddings and implement it for Supabase and OpenAI (current Quivr) class Neurons(BaseModel): - def create_vector(self, doc, user_openai_api_key=None): + def create_vector(self, doc): documents_vector_store = get_documents_vector_store() logger.info("Creating vector for document") logger.info(f"Document: {doc}") - if user_openai_api_key: - documents_vector_store._embedding = OpenAIEmbeddings( - openai_api_key=user_openai_api_key - ) # pyright: ignore reportPrivateUsage=none + try: sids = documents_vector_store.add_documents([doc]) if sids and len(sids) > 0: diff --git a/backend/packages/files/parsers/audio.py b/backend/packages/files/parsers/audio.py index f715c55b0..6f210f383 100644 --- a/backend/packages/files/parsers/audio.py +++ b/backend/packages/files/parsers/audio.py @@ -11,9 +11,7 @@ from packages.files.file import compute_sha1_from_content async def process_audio( file: File, - enable_summarization: bool, user, - user_openai_api_key, ): temp_filename = None file_sha = "" @@ -21,11 +19,6 @@ async def process_audio( file_meta_name = f"audiotranscript_{dateshort}.txt" documents_vector_store = get_documents_vector_store() - # use this for whisper - os.environ.get("OPENAI_API_KEY") - if user_openai_api_key: - pass - try: upload_file = file.file with tempfile.NamedTemporaryFile( diff --git a/backend/packages/files/parsers/code_python.py b/backend/packages/files/parsers/code_python.py index 360601b8b..4806424f3 100644 --- a/backend/packages/files/parsers/code_python.py +++ b/backend/packages/files/parsers/code_python.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -async def process_python(file: File, enable_summarization, brain_id, user_openai_api_key): +async def process_python(file: File, brain_id): return await process_file( file=file, loader_class=PythonLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/common.py b/backend/packages/files/parsers/common.py index 98ba226f3..eacd501ab 100644 --- a/backend/packages/files/parsers/common.py +++ b/backend/packages/files/parsers/common.py @@ -8,9 +8,7 @@ from repository.files.upload_file import DocumentSerializable async def process_file( file: File, loader_class, - enable_summarization, brain_id, - user_openai_api_key, ): dateshort = time.strftime("%Y%m%d") @@ -24,14 +22,13 @@ async def process_file( "chunk_size": file.chunk_size, "chunk_overlap": file.chunk_overlap, "date": dateshort, - "summarization": "true" if enable_summarization else "false", } doc_with_metadata = DocumentSerializable( page_content=doc.page_content, metadata=metadata ) create_embedding_for_document.delay( - brain_id, doc_with_metadata.to_json(), user_openai_api_key, file.file_sha1 + brain_id, doc_with_metadata.to_json(), file.file_sha1 ) return "Hello World!" diff --git a/backend/packages/files/parsers/csv.py b/backend/packages/files/parsers/csv.py index e8c195f38..470515b99 100644 --- a/backend/packages/files/parsers/csv.py +++ b/backend/packages/files/parsers/csv.py @@ -6,14 +6,10 @@ from .common import process_file def process_csv( file: File, - enable_summarization, brain_id, - user_openai_api_key, ): return process_file( file=file, loader_class=CSVLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/docx.py b/backend/packages/files/parsers/docx.py index 641fcc13f..3b26c1a8f 100644 --- a/backend/packages/files/parsers/docx.py +++ b/backend/packages/files/parsers/docx.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_docx(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_docx(file: File, brain_id): return process_file( file=file, loader_class=Docx2txtLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/epub.py b/backend/packages/files/parsers/epub.py index fc5233f2d..695212aae 100644 --- a/backend/packages/files/parsers/epub.py +++ b/backend/packages/files/parsers/epub.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_epub(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_epub(file: File, brain_id): return process_file( file=file, loader_class=UnstructuredEPubLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/github.py b/backend/packages/files/parsers/github.py index deb963eac..bd15296c2 100644 --- a/backend/packages/files/parsers/github.py +++ b/backend/packages/files/parsers/github.py @@ -11,9 +11,7 @@ from packages.files.file import compute_sha1_from_content async def process_github( repo, - enable_summarization, brain_id, - user_openai_api_key, ): random_dir_name = os.urandom(16).hex() dateshort = time.strftime("%Y%m%d") @@ -54,7 +52,6 @@ async def process_github( "chunk_size": chunk_size, "chunk_overlap": chunk_overlap, "date": dateshort, - "summarization": "true" if enable_summarization else "false", } doc_with_metadata = Document(page_content=doc.page_content, metadata=metadata) @@ -66,9 +63,7 @@ async def process_github( if not file_exists: neurons = Neurons() - created_vector = neurons.create_vector( - doc_with_metadata, user_openai_api_key - ) + created_vector = neurons.create_vector(doc_with_metadata) file_exists_in_brain = file.file_already_exists_in_brain(brain_id) diff --git a/backend/packages/files/parsers/html.py b/backend/packages/files/parsers/html.py index c1908578a..3e247cc9c 100644 --- a/backend/packages/files/parsers/html.py +++ b/backend/packages/files/parsers/html.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_html(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_html(file: File, brain_id): return process_file( file=file, loader_class=UnstructuredHTMLLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/markdown.py b/backend/packages/files/parsers/markdown.py index 1f2e6679b..a10f5edbc 100644 --- a/backend/packages/files/parsers/markdown.py +++ b/backend/packages/files/parsers/markdown.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_markdown(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_markdown(file: File, brain_id): return process_file( file=file, loader_class=UnstructuredMarkdownLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/notebook.py b/backend/packages/files/parsers/notebook.py index d05dd5a3d..a610b8f44 100644 --- a/backend/packages/files/parsers/notebook.py +++ b/backend/packages/files/parsers/notebook.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_ipnyb(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_ipnyb(file: File, brain_id): return process_file( file=file, loader_class=NotebookLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/odt.py b/backend/packages/files/parsers/odt.py index f26eccc24..5c57de39e 100644 --- a/backend/packages/files/parsers/odt.py +++ b/backend/packages/files/parsers/odt.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_odt(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_odt(file: File, brain_id): return process_file( file=file, loader_class=UnstructuredPDFLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/pdf.py b/backend/packages/files/parsers/pdf.py index e44b0a37c..fbc9e771f 100644 --- a/backend/packages/files/parsers/pdf.py +++ b/backend/packages/files/parsers/pdf.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_pdf(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_pdf(file: File, brain_id): return process_file( file=file, loader_class=UnstructuredPDFLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/powerpoint.py b/backend/packages/files/parsers/powerpoint.py index 878aa6a63..2f6c49a26 100644 --- a/backend/packages/files/parsers/powerpoint.py +++ b/backend/packages/files/parsers/powerpoint.py @@ -4,11 +4,9 @@ from models import File from .common import process_file -def process_powerpoint(file: File, enable_summarization, brain_id, user_openai_api_key): +def process_powerpoint(file: File, brain_id): return process_file( file=file, loader_class=UnstructuredPowerPointLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/telegram.py b/backend/packages/files/parsers/telegram.py index 128ad7a85..071cc4c79 100644 --- a/backend/packages/files/parsers/telegram.py +++ b/backend/packages/files/parsers/telegram.py @@ -6,14 +6,10 @@ from .common import process_file def process_telegram( file: File, - enable_summarization, brain_id, - user_openai_api_key, ): return process_file( file=file, loader_class=TelegramChatFileLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/txt.py b/backend/packages/files/parsers/txt.py index 1e6dd399a..b38b9318c 100644 --- a/backend/packages/files/parsers/txt.py +++ b/backend/packages/files/parsers/txt.py @@ -4,11 +4,12 @@ from models import File from .common import process_file -async def process_txt(file: File, enable_summarization, brain_id, user_openai_api_key): +async def process_txt( + file: File, + brain_id, +): return await process_file( file=file, loader_class=TextLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/parsers/xlsx.py b/backend/packages/files/parsers/xlsx.py index bd4aa57d6..c3c5d8f64 100644 --- a/backend/packages/files/parsers/xlsx.py +++ b/backend/packages/files/parsers/xlsx.py @@ -6,14 +6,10 @@ from .common import process_file def process_xlsx( file: File, - enable_summarization, brain_id, - user_openai_api_key, ): return process_file( file=file, loader_class=UnstructuredExcelLoader, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, ) diff --git a/backend/packages/files/processors.py b/backend/packages/files/processors.py index 51b03ff26..d7fb0e9ca 100644 --- a/backend/packages/files/processors.py +++ b/backend/packages/files/processors.py @@ -49,9 +49,7 @@ def create_response(message, type): # TODO: Move filter_file to a file service to avoid circular imports from models/files.py for File class async def filter_file( file, - enable_summarization: bool, brain_id, - openai_api_key, original_file_name=None, ): await file.compute_file_sha1() @@ -85,9 +83,7 @@ async def filter_file( try: await file_processors[file.file_extension]( file=file, - enable_summarization=enable_summarization, brain_id=brain_id, - user_openai_api_key=openai_api_key, ) return create_response( f"✅ {using_file_name} has been uploaded to brain {brain.name}.", # pyright: ignore reportPrivateUsage=none diff --git a/backend/routes/brain_routes.py b/backend/routes/brain_routes.py index 734040241..9744f9e13 100644 --- a/backend/routes/brain_routes.py +++ b/backend/routes/brain_routes.py @@ -89,7 +89,6 @@ async def create_new_brain( user_usage = UserUsage( id=current_user.id, email=current_user.email, - openai_api_key=current_user.openai_api_key, ) user_settings = user_usage.get_user_settings() diff --git a/backend/routes/chat/brainful_chat.py b/backend/routes/chat/brainful_chat.py index 555631163..7f6387baa 100644 --- a/backend/routes/chat/brainful_chat.py +++ b/backend/routes/chat/brainful_chat.py @@ -39,7 +39,6 @@ class BrainfulChat(ChatInterface): model, max_tokens, temperature, - user_openai_api_key, streaming, prompt_id, user_id, @@ -59,7 +58,6 @@ class BrainfulChat(ChatInterface): max_tokens=max_tokens, temperature=temperature, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, streaming=streaming, prompt_id=prompt_id, ) @@ -70,7 +68,6 @@ class BrainfulChat(ChatInterface): max_tokens=max_tokens, temperature=temperature, brain_id=brain_id, - user_openai_api_key=user_openai_api_key, streaming=streaming, prompt_id=prompt_id, user_id=user_id, diff --git a/backend/routes/chat/brainless_chat.py b/backend/routes/chat/brainless_chat.py index 5a5a150ef..ce1d7c46f 100644 --- a/backend/routes/chat/brainless_chat.py +++ b/backend/routes/chat/brainless_chat.py @@ -20,7 +20,6 @@ class BrainlessChat(ChatInterface): model, max_tokens, temperature, - user_openai_api_key, streaming, prompt_id, user_id, @@ -30,7 +29,6 @@ class BrainlessChat(ChatInterface): model=model, max_tokens=max_tokens, temperature=temperature, - user_openai_api_key=user_openai_api_key, streaming=streaming, prompt_id=prompt_id, ) diff --git a/backend/routes/chat/interface.py b/backend/routes/chat/interface.py index ef4329aab..6bd2450a5 100644 --- a/backend/routes/chat/interface.py +++ b/backend/routes/chat/interface.py @@ -18,7 +18,6 @@ class ChatInterface(ABC): model, max_tokens, temperature, - user_openai_api_key, streaming, prompt_id, user_id, diff --git a/backend/routes/chat/utils.py b/backend/routes/chat/utils.py index a90bfd7c5..c341c7cad 100644 --- a/backend/routes/chat/utils.py +++ b/backend/routes/chat/utils.py @@ -38,21 +38,18 @@ def delete_chat_from_db(supabase_db: SupabaseDB, chat_id): def check_user_requests_limit( user: UserIdentity, ): - userDailyUsage = UserUsage( - id=user.id, email=user.email, openai_api_key=user.openai_api_key - ) + userDailyUsage = UserUsage(id=user.id, email=user.email) userSettings = userDailyUsage.get_user_settings() date = time.strftime("%Y%m%d") userDailyUsage.handle_increment_user_request_count(date) - if user.openai_api_key is None: - daily_chat_credit = userSettings.get("daily_chat_credit", 0) - if int(userDailyUsage.daily_requests_count) >= int(daily_chat_credit): - raise HTTPException( - status_code=429, # pyright: ignore reportPrivateUsage=none - detail="You have reached the maximum number of requests for today.", # pyright: ignore reportPrivateUsage=none - ) + daily_chat_credit = userSettings.get("daily_chat_credit", 0) + if int(userDailyUsage.daily_requests_count) >= int(daily_chat_credit): + raise HTTPException( + status_code=429, # pyright: ignore reportPrivateUsage=none + detail="You have reached the maximum number of requests for today.", # pyright: ignore reportPrivateUsage=none + ) else: pass diff --git a/backend/routes/chat_routes.py b/backend/routes/chat_routes.py index 7ff4803c7..e5a2c13df 100644 --- a/backend/routes/chat_routes.py +++ b/backend/routes/chat_routes.py @@ -133,22 +133,16 @@ async def create_question_handler( chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id) - current_user.openai_api_key = request.headers.get("Openai-Api-Key") brain = Brain(id=brain_id) brain_details: BrainEntity | None = None userDailyUsage = UserUsage( id=current_user.id, email=current_user.email, - openai_api_key=current_user.openai_api_key, ) userSettings = userDailyUsage.get_user_settings() is_model_ok = (brain_details or chat_question).model in userSettings.get("models", ["gpt-3.5-turbo"]) # type: ignore - if not current_user.openai_api_key: - current_user.openai_api_key = chat_instance.get_openai_api_key( - brain_id=brain_id, user_id=current_user.id - ) # Retrieve chat model (temperature, max_tokens, model) if ( not chat_question.model @@ -171,7 +165,6 @@ async def create_question_handler( max_tokens=chat_question.max_tokens, temperature=chat_question.temperature, brain_id=str(brain_id), - user_openai_api_key=current_user.openai_api_key, # pyright: ignore reportPrivateUsage=none streaming=False, prompt_id=chat_question.prompt_id, user_id=current_user.id, @@ -207,22 +200,15 @@ async def create_stream_question_handler( chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id) # Retrieve user's OpenAI API key - current_user.openai_api_key = request.headers.get("Openai-Api-Key") brain = Brain(id=brain_id) brain_details: BrainEntity | None = None userDailyUsage = UserUsage( id=current_user.id, email=current_user.email, - openai_api_key=current_user.openai_api_key, ) userSettings = userDailyUsage.get_user_settings() - if not current_user.openai_api_key: - current_user.openai_api_key = chat_instance.get_openai_api_key( - brain_id=brain_id, user_id=current_user.id - ) - # Retrieve chat model (temperature, max_tokens, model) if ( not chat_question.model @@ -247,7 +233,6 @@ async def create_stream_question_handler( model=(brain_details or chat_question).model if is_model_ok else "gpt-3.5-turbo", # type: ignore max_tokens=(brain_details or chat_question).max_tokens, # type: ignore temperature=(brain_details or chat_question).temperature, # type: ignore - user_openai_api_key=current_user.openai_api_key, # pyright: ignore reportPrivateUsage=none streaming=True, prompt_id=chat_question.prompt_id, brain_id=str(brain_id), diff --git a/backend/routes/crawl_routes.py b/backend/routes/crawl_routes.py index 951906e43..d6c4ab05d 100644 --- a/backend/routes/crawl_routes.py +++ b/backend/routes/crawl_routes.py @@ -30,7 +30,6 @@ async def crawl_endpoint( crawl_website: CrawlWebsite, brain_id: UUID = Query(..., description="The ID of the brain"), chat_id: Optional[UUID] = Query(None, description="The ID of the chat"), - enable_summarization: bool = False, current_user: UserIdentity = Depends(get_current_user), ): """ @@ -43,7 +42,6 @@ async def crawl_endpoint( userDailyUsage = UserUsage( id=current_user.id, email=current_user.email, - openai_api_key=current_user.openai_api_key, ) userSettings = userDailyUsage.get_user_settings() @@ -81,9 +79,7 @@ async def crawl_endpoint( process_crawl_and_notify.delay( crawl_website_url=crawl_website.url, - enable_summarization=enable_summarization, brain_id=brain_id, - openai_api_key=request.headers.get("Openai-Api-Key", None), notification_id=crawl_notification.id, ) diff --git a/backend/routes/upload_routes.py b/backend/routes/upload_routes.py index f1228e661..78efd498d 100644 --- a/backend/routes/upload_routes.py +++ b/backend/routes/upload_routes.py @@ -11,9 +11,7 @@ from models.databases.supabase.knowledge import CreateKnowledgeProperties from models.databases.supabase.notifications import CreateNotificationProperties from models.notifications import NotificationsStatusEnum from modules.user.entity.user_identity import UserIdentity -from modules.user.repository import get_user_identity from packages.files.file import convert_bytes, get_file_size -from repository.brain import get_brain_details from repository.files.upload_file import upload_file_storage from repository.knowledge.add_knowledge import add_knowledge from repository.notification.add_notification import add_notification @@ -37,7 +35,6 @@ async def upload_file( uploadFile: UploadFile, brain_id: UUID = Query(..., description="The ID of the brain"), chat_id: Optional[UUID] = Query(None, description="The ID of the chat"), - enable_summarization: bool = False, current_user: UserIdentity = Depends(get_current_user), ): validate_brain_authorization( @@ -47,7 +44,6 @@ async def upload_file( userDailyUsage = UserUsage( id=current_user.id, email=current_user.email, - openai_api_key=current_user.openai_api_key, ) userSettings = userDailyUsage.get_user_settings() @@ -72,13 +68,6 @@ async def upload_file( status=NotificationsStatusEnum.Pending, ) ) - openai_api_key = request.headers.get("Openai-Api-Key", None) - if openai_api_key is None: - brain_details = get_brain_details(brain_id) - if brain_details: - openai_api_key = brain_details.openai_api_key - if openai_api_key is None: - openai_api_key = get_user_identity(current_user.id).openai_api_key file_content = await uploadFile.read() filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename) @@ -112,9 +101,7 @@ async def upload_file( process_file_and_notify.delay( file_name=filename_with_brain_id, file_original_name=uploadFile.filename, - enable_summarization=enable_summarization, brain_id=brain_id, - openai_api_key=openai_api_key, notification_id=upload_notification.id if upload_notification else None, ) return {"message": "File processing has started."} diff --git a/backend/tests/test_user_controller.py b/backend/tests/test_user_controller.py index 51cf7f02e..68ae13484 100644 --- a/backend/tests/test_user_controller.py +++ b/backend/tests/test_user_controller.py @@ -32,4 +32,3 @@ def test_get_user_identity(client, api_key): print(user_identity) assert "id" in user_identity assert "email" in user_identity - assert "openai_api_key" in user_identity diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 30bad7980..51c4abf79 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -2,6 +2,7 @@ version: "3" services: frontend: + image: frontend-dev env_file: - ./frontend/.env build: diff --git a/frontend/app/brains-management/[brainId]/components/BrainManagementTabs/components/SettingsTab/components/ModelSelection/ModelSelection.tsx b/frontend/app/brains-management/[brainId]/components/BrainManagementTabs/components/SettingsTab/components/ModelSelection/ModelSelection.tsx index 25bf5e60e..adbde28de 100644 --- a/frontend/app/brains-management/[brainId]/components/BrainManagementTabs/components/SettingsTab/components/ModelSelection/ModelSelection.tsx +++ b/frontend/app/brains-management/[brainId]/components/BrainManagementTabs/components/SettingsTab/components/ModelSelection/ModelSelection.tsx @@ -1,7 +1,6 @@ import { UUID } from "crypto"; import { useTranslation } from "react-i18next"; -import Field from "@/lib/components/ui/Field"; import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens"; import { SaveButton } from "@/shared/SaveButton"; @@ -15,20 +14,13 @@ type ModelSelectionProps = { }; export const ModelSelection = (props: ModelSelectionProps): JSX.Element => { - const { model, maxTokens, temperature, register } = useBrainFormState(); + const { model, maxTokens, register } = useBrainFormState(); const { t } = useTranslation(["translation", "brain", "config"]); const { handleSubmit, hasEditRights, accessibleModels } = props; return ( <> - +
-
- - -
- -
- - -