mirror of
https://github.com/StanGirard/quivr.git
synced 2024-12-12 11:26:07 +03:00
fix: Update import statements for OllamaEmbeddings (#2584)
This pull request fixes the import statements for OllamaEmbeddings in multiple files. The import statements are updated to use the correct package name "langchain_community.embeddings" instead of "langchain.embeddings.ollama". This ensures that the code can be compiled and executed without any import errors.
This commit is contained in:
parent
a1b74d00f5
commit
3086891cb7
@ -1,7 +1,7 @@
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from langchain.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from logger import get_logger
|
||||
from models.databases.supabase.supabase import SupabaseDB
|
||||
|
@ -35,13 +35,13 @@ chat_service = ChatService()
|
||||
class GPT4Brain(KnowledgeBrainQA):
|
||||
"""
|
||||
GPT4Brain integrates with GPT-4 to provide real-time answers and supports various tools to enhance its capabilities.
|
||||
|
||||
|
||||
Available Tools:
|
||||
- WebSearchTool: Performs web searches to find relevant information.
|
||||
- ImageGeneratorTool: Generates images based on textual descriptions.
|
||||
- URLReaderTool: Reads and summarizes content from URLs.
|
||||
- EmailSenderTool: Sends emails with specified content.
|
||||
|
||||
|
||||
Use Cases:
|
||||
- WebSearchTool can be used to find the latest news articles on a specific topic or to gather information from various websites.
|
||||
- ImageGeneratorTool is useful for creating visual content based on textual prompts, such as generating a company logo based on a description.
|
||||
@ -51,7 +51,7 @@ class GPT4Brain(KnowledgeBrainQA):
|
||||
|
||||
tools: Optional[List[BaseTool]] = None
|
||||
tool_executor: Optional[ToolExecutor] = None
|
||||
model_function: ChatOpenAI = None
|
||||
function_model: ChatOpenAI = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -90,7 +90,7 @@ class GPT4Brain(KnowledgeBrainQA):
|
||||
# Define the function that calls the model
|
||||
def call_model(self, state):
|
||||
messages = state["messages"]
|
||||
response = self.model_function.invoke(messages)
|
||||
response = self.function_model.invoke(messages)
|
||||
# We return a list, because this will get added to the existing list
|
||||
return {"messages": [response]}
|
||||
|
||||
@ -166,11 +166,11 @@ class GPT4Brain(KnowledgeBrainQA):
|
||||
return app
|
||||
|
||||
def get_chain(self):
|
||||
self.model_function = ChatOpenAI(
|
||||
self.function_model = ChatOpenAI(
|
||||
model="gpt-4-turbo", temperature=0, streaming=True
|
||||
)
|
||||
|
||||
self.model_function = self.model_function.bind_tools(self.tools)
|
||||
self.function_model = self.function_model.bind_tools(self.tools)
|
||||
|
||||
graph = self.create_graph()
|
||||
|
||||
|
@ -4,7 +4,7 @@ from typing import List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from langchain.chains import ConversationalRetrievalChain
|
||||
from langchain.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate
|
||||
from langchain.retrievers import ContextualCompressionRetriever
|
||||
|
@ -3,7 +3,7 @@ from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
from langchain.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain_community.embeddings import OllamaEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from logger import get_logger
|
||||
from middlewares.auth import AuthBearer, get_current_user
|
||||
|
Loading…
Reference in New Issue
Block a user