From 7e9786fccfcd7a526681426f030c038adbaf913f Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 11 Jan 2024 12:02:39 -0500 Subject: [PATCH] chat: set search path early This fixes the issues with installed versions of v2.6.0. --- gpt4all-backend/llmodel.cpp | 6 +++--- gpt4all-backend/llmodel.h | 2 +- gpt4all-chat/llm.cpp | 23 ++++------------------- gpt4all-chat/llm.h | 4 ++-- gpt4all-chat/main.cpp | 19 +++++++++++++++++-- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index 2431129f..bc9176f0 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -192,7 +192,7 @@ LLModel *LLModel::Implementation::construct(const std::string &modelPath, std::s return fres; } -LLModel *LLModel::Implementation::constructCpuLlama() { +LLModel *LLModel::Implementation::constructDefaultLlama() { const LLModel::Implementation *impl = nullptr; for (const auto &i : implementationList()) { if (i.m_buildVariant == "metal" || i.m_modelType != "LLaMA") continue; @@ -208,8 +208,8 @@ LLModel *LLModel::Implementation::constructCpuLlama() { } std::vector LLModel::Implementation::availableGPUDevices() { - static LLModel *cpuLlama = LLModel::Implementation::constructCpuLlama(); // (memory leak) - if (cpuLlama) { return cpuLlama->availableGPUDevices(0); } + static LLModel *llama = LLModel::Implementation::constructDefaultLlama(); // (memory leak) + if (llama) { return llama->availableGPUDevices(0); } return {}; } diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index f1551cfb..c0b8bca9 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -43,7 +43,7 @@ public: static const std::string& implementationsSearchPath(); private: - static LLModel *constructCpuLlama(); + static LLModel *constructDefaultLlama(); bool (*m_magicMatch)(const char *fname); LLModel *(*m_construct)(); diff --git a/gpt4all-chat/llm.cpp b/gpt4all-chat/llm.cpp index c83e07f4..75803c22 100644 --- a/gpt4all-chat/llm.cpp +++ b/gpt4all-chat/llm.cpp @@ -1,15 +1,14 @@ #include "llm.h" #include "../gpt4all-backend/sysinfo.h" -#include "../gpt4all-backend/llmodel.h" -#include "network.h" #include +#include #include #include #include #include #include -#include +#include #include class MyLLM: public LLM { }; @@ -23,20 +22,6 @@ LLM::LLM() : QObject{nullptr} , m_compatHardware(true) { - QString llmodelSearchPaths = QCoreApplication::applicationDirPath(); - const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/"; - if (directoryExists(libDir)) - llmodelSearchPaths += ";" + libDir; -#if defined(Q_OS_MAC) - const QString binDir = QCoreApplication::applicationDirPath() + "/../../../"; - if (directoryExists(binDir)) - llmodelSearchPaths += ";" + binDir; - const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/"; - if (directoryExists(frameworksDir)) - llmodelSearchPaths += ";" + frameworksDir; -#endif - LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); - #if defined(__x86_64__) #ifndef _MSC_VER const bool minimal(__builtin_cpu_supports("avx")); @@ -86,7 +71,7 @@ bool LLM::checkForUpdates() const #endif } -bool LLM::directoryExists(const QString &path) const +bool LLM::directoryExists(const QString &path) { const QUrl url(path); const QString localFilePath = url.isLocalFile() ? url.toLocalFile() : path; @@ -94,7 +79,7 @@ bool LLM::directoryExists(const QString &path) const return info.exists() && info.isDir(); } -bool LLM::fileExists(const QString &path) const +bool LLM::fileExists(const QString &path) { const QUrl url(path); const QString localFilePath = url.isLocalFile() ? url.toLocalFile() : path; diff --git a/gpt4all-chat/llm.h b/gpt4all-chat/llm.h index 8a582e3a..067ee671 100644 --- a/gpt4all-chat/llm.h +++ b/gpt4all-chat/llm.h @@ -13,8 +13,8 @@ public: Q_INVOKABLE bool compatHardware() const { return m_compatHardware; } Q_INVOKABLE bool checkForUpdates() const; - Q_INVOKABLE bool directoryExists(const QString &path) const; - Q_INVOKABLE bool fileExists(const QString &path) const; + Q_INVOKABLE static bool directoryExists(const QString &path); + Q_INVOKABLE static bool fileExists(const QString &path); Q_INVOKABLE qint64 systemTotalRAMInGB() const; Q_INVOKABLE QString systemTotalRAMInGBString() const; diff --git a/gpt4all-chat/main.cpp b/gpt4all-chat/main.cpp index 2475f5a0..1b8cb353 100644 --- a/gpt4all-chat/main.cpp +++ b/gpt4all-chat/main.cpp @@ -1,9 +1,8 @@ +#include #include #include #include -#include - #include "llm.h" #include "modellist.h" #include "chatlistmodel.h" @@ -13,6 +12,7 @@ #include "mysettings.h" #include "config.h" #include "logger.h" +#include "../gpt4all-backend/llmodel.h" int main(int argc, char *argv[]) { @@ -25,6 +25,21 @@ int main(int argc, char *argv[]) QGuiApplication app(argc, argv); QQmlApplicationEngine engine; + + QString llmodelSearchPaths = QCoreApplication::applicationDirPath(); + const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/"; + if (LLM::directoryExists(libDir)) + llmodelSearchPaths += ";" + libDir; +#if defined(Q_OS_MAC) + const QString binDir = QCoreApplication::applicationDirPath() + "/../../../"; + if (LLM::directoryExists(binDir)) + llmodelSearchPaths += ";" + binDir; + const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/"; + if (LLM::directoryExists(frameworksDir)) + llmodelSearchPaths += ";" + frameworksDir; +#endif + LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); + qmlRegisterSingletonInstance("mysettings", 1, 0, "MySettings", MySettings::globalInstance()); qmlRegisterSingletonInstance("modellist", 1, 0, "ModelList", ModelList::globalInstance()); qmlRegisterSingletonInstance("chatlistmodel", 1, 0, "ChatListModel", ChatListModel::globalInstance());