mirror of
https://github.com/QuivrHQ/quivr.git
synced 2024-12-18 11:51:41 +03:00
59fe7b089b
* feat(chat): use openai function for answer (backend) * feat(chat): use openai function for answer (frontend) * chore: refacto BrainPicking * feat: update chat creation logic * feat: simplify chat system logic * feat: set default method to gpt-3.5-turbo-0613 * feat: use user own openai key * feat(chat): slightly improve prompts * feat: add global error interceptor * feat: remove unused endpoints * docs: update chat system doc * chore(linter): add unused import remove config * feat: improve dx * feat: improve OpenAiFunctionBasedAnswerGenerator prompt
63 lines
1.6 KiB
TypeScript
63 lines
1.6 KiB
TypeScript
/* eslint-disable */
|
|
import { useEffect, useState } from "react";
|
|
|
|
import { isSpeechRecognitionSupported } from "@/lib/helpers/isSpeechRecognitionSupported";
|
|
|
|
type useSpeechProps = {
|
|
setMessage: (newValue: string | ((prevValue: string) => string)) => void;
|
|
};
|
|
|
|
export const useSpeech = ({ setMessage }: useSpeechProps) => {
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [speechSupported, setSpeechSupported] = useState(false);
|
|
|
|
useEffect(() => {
|
|
if (isSpeechRecognitionSupported()) {
|
|
setSpeechSupported(true);
|
|
const SpeechRecognition =
|
|
window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
|
|
const mic = new SpeechRecognition();
|
|
|
|
mic.continuous = true;
|
|
mic.interimResults = false;
|
|
mic.lang = "en-US";
|
|
|
|
mic.onstart = () => {
|
|
console.log("Mics on");
|
|
};
|
|
|
|
mic.onend = () => {
|
|
console.log("Mics off");
|
|
};
|
|
|
|
mic.onerror = (event: SpeechRecognitionErrorEvent) => {
|
|
console.log(event.error);
|
|
setIsListening(false);
|
|
};
|
|
|
|
mic.onresult = (event: SpeechRecognitionEvent) => {
|
|
const interimTranscript =
|
|
event.results[event.results.length - 1][0].transcript;
|
|
setMessage((prevMessage) => prevMessage + interimTranscript);
|
|
};
|
|
|
|
if (isListening) {
|
|
mic.start();
|
|
}
|
|
|
|
return () => {
|
|
if (mic) {
|
|
mic.stop();
|
|
}
|
|
};
|
|
}
|
|
}, [isListening, setMessage]);
|
|
|
|
const startListening = () => {
|
|
setIsListening((prevIsListening) => !prevIsListening);
|
|
};
|
|
|
|
return { startListening, speechSupported, isListening };
|
|
};
|