chore(refacto): removed unused

This commit is contained in:
Stan Girard 2023-06-20 00:14:40 +02:00
parent 99258790ad
commit 533446a2b4
2 changed files with 1 additions and 41 deletions

View File

@ -4,6 +4,7 @@ from uuid import UUID
from auth.auth_bearer import AuthBearer, get_current_user
from fastapi import APIRouter, Depends, Request
from llm.brainpicking import BrainPicking
from models.chats import ChatMessage
from models.settings import CommonsDep, common_dependencies
from models.users import User
@ -11,8 +12,6 @@ from utils.chats import (create_chat, get_chat_name_from_first_question,
update_chat)
from utils.users import (create_user, fetch_user_id_from_credentials,
update_user_request_count)
from utils.vectors import get_answer
from llm.brainpicking import BrainPicking
chat_router = APIRouter()

View File

@ -50,42 +50,3 @@ def create_summary(commons: CommonsDep, document_id, content, metadata):
if sids and len(sids) > 0:
commons['supabase'].table("summaries").update(
{"document_id": document_id}).match({"id": sids[0]}).execute()
def get_answer(commons: CommonsDep, chat_message: ChatMessage, email: str, user_openai_api_key: str):
Brain = BrainPicking().init(chat_message.model, email)
qa = Brain.get_qa(chat_message, user_openai_api_key)
# if chat_message.use_summarization:
# summaries = neurons.similarity_search(chat_message.question, table='match_summaries')
# evaluations = llm_evaluate_summaries(
# chat_message.question, summaries, chat_message.model)
# if evaluations:
# response = commons['supabase'].from_('vectors').select(
# '*').in_('id', values=[e['document_id'] for e in evaluations]).execute()
# additional_context = '---\nAdditional Context={}'.format(
# '---\n'.join(data['content'] for data in response.data)
# ) + '\n'
# model_response = qa(
# {"question": additional_context + chat_message.question})
# else:
# transformed_history = []
# for i in range(0, len(chat_message.history) - 1, 2):
# user_message = chat_message.history[i][1]
# assistant_message = chat_message.history[i + 1][1]
# transformed_history.append((user_message, assistant_message))
# model_response = qa({"question": chat_message.question, "chat_history": transformed_history})
# answer = model_response['answer']
# if "source_documents" in answer:
# sources = [
# doc.metadata["file_name"] for doc in answer["source_documents"]
# if "file_name" in doc.metadata]
# if sources:
# files = dict.fromkeys(sources)
# answer = answer + "\n\nRef: " + "; ".join(files)
return answer