quivr/frontend/app/chat/[chatId]/hooks/useChat.ts
Matt 6f047f4a39
feat: streaming for standard brain picking (#385)
* feat: streaming for standard brain picking

* fix(bug): private llm

* wip: test

Co-authored-by: Mamadou DICKO <mamadoudicko@users.noreply.github.com>

* wip: almost good

Co-authored-by: Mamadou DICKO <mamadoudicko@users.noreply.github.com>

* feat: useFetch

* chore: remove 💀

* chore: fix linting

* fix: forward the request if not streaming

* feat: streaming for standard brain picking

* fix(bug): private llm

* wip: test

Co-authored-by: Mamadou DICKO <mamadoudicko@users.noreply.github.com>

* wip: almost good

Co-authored-by: Mamadou DICKO <mamadoudicko@users.noreply.github.com>

* feat: useFetch

* chore: remove 💀

* chore: fix linting

* fix: forward the request if not streaming

* fix: 💀 code

* fix: check_user_limit

* feat: brain_id to new chat stream

* fix: missing imports

* feat: message_id created on backend

Co-authored-by: Mamadou DICKO <mamadoudicko@users.noreply.github.com>

* chore: remove dead

* remove: cpython

* remove: dead

---------

Co-authored-by: Mamadou DICKO <mamadoudicko@users.noreply.github.com>
2023-06-30 10:10:59 +02:00

109 lines
2.9 KiB
TypeScript

/* eslint-disable max-lines */
import { AxiosError } from "axios";
import { useParams } from "next/navigation";
import { useEffect, useState } from "react";
import { useBrainConfig } from "@/lib/context/BrainConfigProvider/hooks/useBrainConfig";
import { useToast } from "@/lib/hooks";
import { useEventTracking } from "@/services/analytics/useEventTracking";
import { useChatService } from "./useChatService";
import { useChatContext } from "../context/ChatContext";
import { ChatQuestion } from "../types";
// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types
export const useChat = () => {
const { track } = useEventTracking();
const params = useParams();
const [chatId, setChatId] = useState<string | undefined>(
params?.chatId as string | undefined
);
const [generatingAnswer, setGeneratingAnswer] = useState(false);
const {
config: { maxTokens, model, temperature },
} = useBrainConfig();
const { history, setHistory } = useChatContext();
const { publish } = useToast();
const {
createChat,
getChatHistory,
addStreamQuestion,
addQuestion: addQuestionToModel,
} = useChatService();
useEffect(() => {
const fetchHistory = async () => {
const currentChatId = chatId;
const chatHistory = await getChatHistory(currentChatId);
if (chatId === currentChatId && chatHistory.length > 0) {
setHistory(chatHistory);
}
};
void fetchHistory();
}, [chatId, getChatHistory, setHistory]);
const generateNewChatIdFromName = async (
chatName: string
): Promise<string> => {
const chat = await createChat({ name: chatName });
return chat.chat_id;
};
const addQuestion = async (question: string, callback?: () => void) => {
const chatQuestion: ChatQuestion = {
model,
question,
temperature,
max_tokens: maxTokens,
};
try {
void track("QUESTION_ASKED");
setGeneratingAnswer(true);
const currentChatId =
chatId ??
// if chatId is undefined, we need to create a new chat on fly
(await generateNewChatIdFromName(
question.split(" ").slice(0, 3).join(" ")
));
setChatId(currentChatId);
if (chatQuestion.model === "gpt-3.5-turbo") {
await addStreamQuestion(currentChatId, chatQuestion);
} else {
await addQuestionToModel(currentChatId, chatQuestion);
}
callback?.();
} catch (error) {
console.error({ error });
if ((error as AxiosError).response?.status === 429) {
publish({
variant: "danger",
text: "You have reached the limit of requests, please try again later",
});
return;
}
publish({
variant: "danger",
text: "Error occurred while getting answer",
});
} finally {
setGeneratingAnswer(false);
}
};
return {
history,
addQuestion,
generatingAnswer,
};
};