diff --git a/core/quivr_core/rag/entities/config.py b/core/quivr_core/rag/entities/config.py index cb455ab42..1e848bdcd 100644 --- a/core/quivr_core/rag/entities/config.py +++ b/core/quivr_core/rag/entities/config.py @@ -230,6 +230,7 @@ class LLMEndpointConfig(QuivrBaseConfig): if not self.llm_api_key: logger.warning(f"The API key for supplier '{self.supplier}' is not set. ") + logger.warning(f"Please set the environment variable: '{self.env_variable_name}'. ") def set_llm_model_config(self): # Automatically set context_length and tokenizer_hub based on the supplier and model diff --git a/examples/chatbot/README.md b/examples/chatbot/README.md index 3cfa916ae..2602d54bb 100644 --- a/examples/chatbot/README.md +++ b/examples/chatbot/README.md @@ -8,23 +8,32 @@ This example demonstrates how to create a simple chatbot using Quivr and Chainli ## Installation -1. Clone the repository or navigate to the `core/examples/chatbot` directory. +1. Clone the repository and navigate to the `examples/chatbot` directory. -2. Install the required dependencies: +2. Make sure you have [rye](https://rye.astral.sh/) installed. +3. Install the requirements using `rye`: + + ```sh + rye sync ``` - pip install -r requirements.lock +4. Activate the venv + + ```sh + source ./venv/bin/activate ``` ## Running the Chatbot -1. Start the Chainlit server: +1. Define your API key as environment variable. e.g. `export OPENAI_API_KEY=your-key-here` + +2. Start the Chainlit server: ``` chainlit run main.py ``` -2. Open your web browser and go to the URL displayed in the terminal (usually `http://localhost:8000`). +3. Open your web browser and go to the URL displayed in the terminal (usually `http://localhost:8000`). ## Using the Chatbot