feat: composite brains get (#1837)

# Description

Please include a summary of the changes and the related issue. Please
also include relevant motivation and context.

## Checklist before requesting a review

Please delete options that are not relevant.

- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my code
- [ ] I have commented hard-to-understand areas
- [ ] I have ideally added tests that prove my fix is effective or that
my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged

## Screenshots (if appropriate):
This commit is contained in:
Zineb El Bachiri 2023-12-06 15:44:36 +01:00 committed by GitHub
parent ab58f95d48
commit 90c4a44525
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 15 additions and 13 deletions

View File

@ -1,4 +1,4 @@
from .qa_base import QABaseBrainPicking
from .knowledge_brain_qa import KnowledgeBrainQA
from .qa_headless import HeadlessQA
__all__ = ["QABaseBrainPicking", "HeadlessQA"]
__all__ = ["KnowledgeBrainQA", "HeadlessQA"]

View File

@ -4,7 +4,7 @@ from uuid import UUID
from fastapi import HTTPException
from litellm import completion
from llm.qa_base import QABaseBrainPicking
from llm.knowledge_brain_qa import KnowledgeBrainQA
from llm.utils.call_brain_api import call_brain_api
from llm.utils.get_api_brain_definition_as_json_schema import (
get_api_brain_definition_as_json_schema,
@ -23,7 +23,7 @@ logger = get_logger(__name__)
class APIBrainQA(
QABaseBrainPicking,
KnowledgeBrainQA,
):
user_id: UUID

View File

@ -39,7 +39,7 @@ brain_service = BrainService()
chat_service = ChatService()
class QABaseBrainPicking(BaseModel):
class KnowledgeBrainQA(BaseModel):
"""
Main class for the Brain Picking functionality.
It allows to initialize a Chat model, generate questions and retrieve answers using ConversationalRetrievalChain.

View File

@ -21,8 +21,8 @@ class CompositeBrainsConnections(CompositeBrainsConnectionsInterface):
self.db.table("composite_brain_connections")
.insert(
{
"composite_brain_id": composite_brain_id,
"connected_brain_id": connected_brain_id,
"composite_brain_id": str(composite_brain_id),
"connected_brain_id": str(connected_brain_id),
}
)
.execute()

View File

@ -218,7 +218,6 @@ class BrainService:
def get_brain_details(self, brain_id: UUID) -> BrainEntity | None:
brain = self.brain_repository.get_brain_details(brain_id)
# id ?
if brain == None:
return None
@ -228,6 +227,12 @@ class BrainService:
)
brain.brain_definition = brain_definition
if brain.brain_type == BrainType.COMPOSITE:
brain.connected_brains_ids = (
self.composite_brains_connections_repository.get_connected_brains(
brain_id
)
)
return brain
def get_public_brains(self) -> list[PublicBrain]:

View File

@ -1,6 +1,6 @@
from fastapi import HTTPException
from llm.api_brain_qa import APIBrainQA
from llm.qa_base import QABaseBrainPicking
from llm.knowledge_brain_qa import KnowledgeBrainQA
from modules.brain.entity.brain_entity import BrainType, RoleEnum
from modules.brain.service.brain_authorization_service import (
validate_brain_authorization,
@ -49,7 +49,7 @@ class BrainfulChat(ChatInterface):
brain.brain_type == BrainType.DOC
or model not in models_supporting_function_calls
):
return QABaseBrainPicking(
return KnowledgeBrainQA(
chat_id=chat_id,
model=model,
max_tokens=max_tokens,

View File

@ -4,8 +4,6 @@ from venv import logger
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse
from llm.qa_base import QABaseBrainPicking
from llm.qa_headless import HeadlessQA
from middlewares.auth import AuthBearer, get_current_user
from models.user_usage import UserUsage
from modules.brain.service.brain_service import BrainService
@ -231,7 +229,6 @@ async def create_stream_question_handler(
try:
logger.info(f"Streaming request for {chat_question.model}")
check_user_requests_limit(current_user)
gpt_answer_generator: HeadlessQA | QABaseBrainPicking
# TODO check if model is in the list of models available for the user
is_model_ok = chat_question.model in user_settings.get("models", ["gpt-3.5-turbo"]) # type: ignore