mirror of
https://github.com/QuivrHQ/quivr.git
synced 2024-12-15 09:32:22 +03:00
481f24f5be
# Description - Introduce `LLMEndpoint` class: wrapper around a `BaseChatLLM` to load OpenAI compatible models - Add `brain.search(...)` function to retrieve - Test with test-coverage: - Added ability to load brain `from langchain.Document` - Configured mypy and poetry lock in `.pre-commit.yaml` # Test coverage ![image](https://github.com/QuivrHQ/quivr/assets/14312141/629ede66-146e-400f-b40b-8c22a9258a47) --------- Co-authored-by: aminediro <aminedirhoussi@gmail.com>
23 lines
511 B
Python
23 lines
511 B
Python
from quivr_core.config import LLMEndpointConfig, RAGConfig
|
|
|
|
|
|
def test_default_llm_config():
|
|
config = LLMEndpointConfig()
|
|
|
|
assert config == LLMEndpointConfig(
|
|
model="gpt-3.5-turbo-0125",
|
|
llm_base_url=None,
|
|
llm_api_key=None,
|
|
max_input=2000,
|
|
max_tokens=2000,
|
|
streaming=True,
|
|
)
|
|
|
|
|
|
def test_default_ragconfig():
|
|
config = RAGConfig()
|
|
|
|
assert config.max_files == 20
|
|
assert config.prompt is None
|
|
assert config.llm_config == LLMEndpointConfig()
|