fix(openai): user key now used for llm model

This commit is contained in:
Stan Girard 2023-08-01 13:46:53 +02:00
parent 33481eb985
commit c01433c841
4 changed files with 8 additions and 11 deletions

View File

@ -152,7 +152,8 @@ class BaseBrainPicking(BaseModel):
This function should also call: _create_qa, get_chat_history and format_chat_history.
It should also update the chat_history in the DB.
"""
async def generate_stream(self, question: str) -> AsyncIterable:
"""
Generate a streaming answer to a given question using QA Chain.

View File

@ -46,7 +46,7 @@ class OpenAIBrainPicking(QABaseBrainPicking):
openai_api_key=self.openai_api_key
) # pyright: ignore reportPrivateUsage=none
def _create_llm(self, model, streaming=False, callbacks=None) -> BaseLLM:
def _create_llm(self, model, temperature=0, streaming=False, callbacks=None) -> BaseLLM:
"""
Determine the language model to be used.
:param model: Language model name to be used.
@ -55,9 +55,10 @@ class OpenAIBrainPicking(QABaseBrainPicking):
:return: Language model instance
"""
return ChatOpenAI(
temperature=self.temperature,
temperature=temperature,
model=model,
streaming=streaming,
verbose=True,
callbacks=callbacks,
openai_api_key=self.openai_api_key,
) # pyright: ignore reportPrivateUsage=none

View File

@ -13,7 +13,6 @@ from repository.chat.get_chat_history import get_chat_history
from repository.chat.update_chat_history import update_chat_history
from supabase.client import Client, create_client
from vectorstore.supabase import CustomSupabaseVectorStore
from langchain.chat_models import ChatOpenAI
from repository.chat.update_message_by_id import update_message_by_id
import json
@ -175,12 +174,8 @@ class QABaseBrainPicking(BaseBrainPicking):
callback = self.callbacks[0]
callback = AsyncIteratorCallbackHandler()
self.callbacks = [callback]
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
)
llm = ChatOpenAI(temperature=0)
model = self._create_llm(model=self.model, streaming=True, callbacks=self.callbacks)
llm = self._create_llm(model=self.model,temperature=self.temperature)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(model, chain_type="stuff")
qa = ConversationalRetrievalChain(

View File

@ -1,4 +1,4 @@
export const GITHUB_URL = "https://github.com/stangirard/quivr";
export const TWITTER_URL = "https://twitter.com/quivr_brain";
export const DISCORD_URL = "https://discord.gg/HUpRgp2HG8";
export const DEFAULT_BACKEND_URL = "http://localhost:5000";
export const DEFAULT_BACKEND_URL = "http://localhost:5050";