mirror of
https://github.com/QuivrHQ/quivr.git
synced 2024-12-04 18:50:55 +03:00
285fe5b960
# Description This PR includes far too many new features: - detection of user intent (closes CORE-211) - treating multiple questions in parallel (closes CORE-212) - using the chat history when answering a question (closes CORE-213) - filtering of retrieved chunks by relevance threshold (closes CORE-217) - dynamic retrieval of chunks (closes CORE-218) - enabling web search via Tavily (closes CORE-220) - enabling agent / assistant to activate tools when relevant to complete the user task (closes CORE-224) Also closes CORE-205 ## Checklist before requesting a review Please delete options that are not relevant. - [ ] My code follows the style guidelines of this project - [ ] I have performed a self-review of my code - [ ] I have commented hard-to-understand areas - [ ] I have ideally added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] Any dependent changes have been merged ## Screenshots (if appropriate): --------- Co-authored-by: Stan Girard <stan@quivr.app>
42 lines
1.4 KiB
Python
42 lines
1.4 KiB
Python
from langchain_core.embeddings import DeterministicFakeEmbedding
|
|
from langchain_core.language_models import FakeListChatModel
|
|
from quivr_core import Brain
|
|
from quivr_core.rag.entities.config import LLMEndpointConfig
|
|
from quivr_core.llm.llm_endpoint import LLMEndpoint
|
|
from rich.console import Console
|
|
from rich.panel import Panel
|
|
from rich.prompt import Prompt
|
|
|
|
if __name__ == "__main__":
|
|
brain = Brain.from_files(
|
|
name="test_brain",
|
|
file_paths=["tests/processor/data/dummy.pdf"],
|
|
llm=LLMEndpoint(
|
|
llm=FakeListChatModel(responses=["good"]),
|
|
llm_config=LLMEndpointConfig(model="fake_model", llm_base_url="local"),
|
|
),
|
|
embedder=DeterministicFakeEmbedding(size=20),
|
|
)
|
|
# Check brain info
|
|
brain.print_info()
|
|
|
|
console = Console()
|
|
console.print(Panel.fit("Ask your brain !", style="bold magenta"))
|
|
|
|
while True:
|
|
# Get user input
|
|
question = Prompt.ask("[bold cyan]Question[/bold cyan]")
|
|
|
|
# Check if user wants to exit
|
|
if question.lower() == "exit":
|
|
console.print(Panel("Goodbye!", style="bold yellow"))
|
|
break
|
|
|
|
answer = brain.ask(question)
|
|
# Print the answer with typing effect
|
|
console.print(f"[bold green]Quivr Assistant[/bold green]: {answer.answer}")
|
|
|
|
console.print("-" * console.width)
|
|
|
|
brain.print_info()
|