quivr/backend/core/tests/test_llm_endpoint.py
AmineDiro 481f24f5be
feat: quivr core minimal chat (#2818)
# Description
- Introduce `LLMEndpoint` class: wrapper around a `BaseChatLLM` to load
OpenAI compatible models
- Add `brain.search(...)` function to retrieve
- Test with test-coverage: 
- Added ability to load brain `from langchain.Document`
- Configured mypy and poetry lock in `.pre-commit.yaml`

# Test coverage

![image](https://github.com/QuivrHQ/quivr/assets/14312141/629ede66-146e-400f-b40b-8c22a9258a47)

---------

Co-authored-by: aminediro <aminedirhoussi@gmail.com>
2024-07-09 17:55:14 +02:00

34 lines
968 B
Python

import os
import pytest
from langchain_core.language_models import FakeListChatModel
from langchain_openai import ChatOpenAI
from pydantic.v1.error_wrappers import ValidationError
from quivr_core.config import LLMEndpointConfig
from quivr_core.llm import LLMEndpoint
def test_llm_endpoint_from_config_default():
del os.environ["OPENAI_API_KEY"]
with pytest.raises(ValidationError):
llm = LLMEndpoint.from_config(LLMEndpointConfig())
# Working default
config = LLMEndpointConfig(llm_api_key="test")
llm = LLMEndpoint.from_config(config=config)
assert llm.supports_func_calling()
assert isinstance(llm._llm, ChatOpenAI)
assert llm._llm.model_name in llm.get_config().model
def test_llm_endpoint_constructor():
llm_endpoint = FakeListChatModel(responses=[])
llm_endpoint = LLMEndpoint(
llm=llm_endpoint, llm_config=LLMEndpointConfig(model="test")
)
assert not llm_endpoint.supports_func_calling()