fix: Update import statements for OllamaEmbeddings (#2584)

This pull request fixes the import statements for OllamaEmbeddings in
multiple files. The import statements are updated to use the correct
package name "langchain_community.embeddings" instead of
"langchain.embeddings.ollama". This ensures that the code can be
compiled and executed without any import errors.
This commit is contained in:
Stan Girard 2024-05-11 20:50:13 +02:00 committed by GitHub
parent a1b74d00f5
commit 3086891cb7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 9 additions and 9 deletions

View File

@ -1,7 +1,7 @@
from typing import Optional
from uuid import UUID
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
from langchain_openai import OpenAIEmbeddings
from logger import get_logger
from models.databases.supabase.supabase import SupabaseDB

View File

@ -51,7 +51,7 @@ class GPT4Brain(KnowledgeBrainQA):
tools: Optional[List[BaseTool]] = None
tool_executor: Optional[ToolExecutor] = None
model_function: ChatOpenAI = None
function_model: ChatOpenAI = None
def __init__(
self,
@ -90,7 +90,7 @@ class GPT4Brain(KnowledgeBrainQA):
# Define the function that calls the model
def call_model(self, state):
messages = state["messages"]
response = self.model_function.invoke(messages)
response = self.function_model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
@ -166,11 +166,11 @@ class GPT4Brain(KnowledgeBrainQA):
return app
def get_chain(self):
self.model_function = ChatOpenAI(
self.function_model = ChatOpenAI(
model="gpt-4-turbo", temperature=0, streaming=True
)
self.model_function = self.model_function.bind_tools(self.tools)
self.function_model = self.function_model.bind_tools(self.tools)
graph = self.create_graph()

View File

@ -4,7 +4,7 @@ from typing import List, Optional
from uuid import UUID
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
from langchain.llms.base import BaseLLM
from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langchain.retrievers import ContextualCompressionRetriever

View File

@ -3,7 +3,7 @@ from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain_community.embeddings import OllamaEmbeddings
from langchain_openai import OpenAIEmbeddings
from logger import get_logger
from middlewares.auth import AuthBearer, get_current_user