Modellist temp

This commit is contained in:
Adam Treat 2023-06-22 15:44:49 -04:00
parent c1794597a7
commit 7f01b153b3
25 changed files with 1784 additions and 1108 deletions

View File

@ -71,9 +71,10 @@ qt_add_executable(chat
chatgpt.h chatgpt.cpp
database.h database.cpp
download.h download.cpp
network.h network.cpp
localdocs.h localdocs.cpp localdocsmodel.h localdocsmodel.cpp
llm.h llm.cpp
modellist.h modellist.cpp
network.h network.cpp
server.h server.cpp
logger.h logger.cpp
responsetext.h responsetext.cpp

View File

@ -1,8 +1,8 @@
#include "chat.h"
#include "chatlistmodel.h"
#include "llm.h"
#include "localdocs.h"
#include "modellist.h"
#include "network.h"
#include "download.h"
#include "server.h"
Chat::Chat(QObject *parent)
@ -45,17 +45,6 @@ Chat::~Chat()
void Chat::connectLLM()
{
const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
const QString localPath = Download::globalInstance()->downloadLocalModelsPath();
m_watcher = new QFileSystemWatcher(this);
m_watcher->addPath(exePath);
m_watcher->addPath(localPath);
// Should be in same thread
connect(Download::globalInstance(), &Download::modelListChanged, this, &Chat::handleModelListChanged, Qt::DirectConnection);
connect(this, &Chat::modelNameChanged, this, &Chat::handleModelListChanged, Qt::DirectConnection);
connect(m_watcher, &QFileSystemWatcher::directoryChanged, this, &Chat::handleModelListChanged);
// Should be in different threads
connect(m_llmodel, &ChatLLM::isModelLoadedChanged, this, &Chat::handleModelLoadedChanged, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);
@ -66,24 +55,23 @@ void Chat::connectLLM()
connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::databaseResultsChanged, this, &Chat::handleDatabaseResultsChanged, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::modelInfoChanged, this, &Chat::handleModelInfoChanged, Qt::QueuedConnection);
connect(this, &Chat::promptRequested, m_llmodel, &ChatLLM::prompt, Qt::QueuedConnection);
connect(this, &Chat::modelNameChangeRequested, m_llmodel, &ChatLLM::modelNameChangeRequested, Qt::QueuedConnection);
connect(this, &Chat::modelChangeRequested, m_llmodel, &ChatLLM::modelChangeRequested, Qt::QueuedConnection);
connect(this, &Chat::loadDefaultModelRequested, m_llmodel, &ChatLLM::loadDefaultModel, Qt::QueuedConnection);
connect(this, &Chat::loadModelRequested, m_llmodel, &ChatLLM::loadModel, Qt::QueuedConnection);
connect(this, &Chat::generateNameRequested, m_llmodel, &ChatLLM::generateName, Qt::QueuedConnection);
connect(this, &Chat::regenerateResponseRequested, m_llmodel, &ChatLLM::regenerateResponse, Qt::QueuedConnection);
connect(this, &Chat::resetResponseRequested, m_llmodel, &ChatLLM::resetResponse, Qt::QueuedConnection);
connect(this, &Chat::resetContextRequested, m_llmodel, &ChatLLM::resetContext, Qt::QueuedConnection);
emit defaultModelChanged(modelList().first());
}
void Chat::reset()
{
stopGenerating();
// Erase our current on disk representation as we're completely resetting the chat along with id
LLM::globalInstance()->chatListModel()->removeChatFile(this);
ChatListModel::globalInstance()->removeChatFile(this);
emit resetContextRequested();
m_id = Network::globalInstance()->generateUniqueId();
emit idChanged(m_id);
@ -251,21 +239,21 @@ void Chat::responseStopped()
Network::globalInstance()->sendChatStarted();
}
QString Chat::modelName() const
ModelInfo Chat::modelInfo() const
{
return m_modelName;
return m_modelInfo;
}
void Chat::setModelName(const QString &modelName)
void Chat::setModelInfo(const ModelInfo &modelInfo)
{
if (m_modelName == modelName)
if (m_modelInfo == modelInfo)
return;
m_modelLoadingError = QString();
emit modelLoadingErrorChanged();
m_modelName = modelName;
emit modelNameChanged();
emit modelNameChangeRequested(modelName);
m_modelInfo = modelInfo;
emit modelInfoChanged();
emit modelChangeRequested(modelInfo);
}
void Chat::newPromptResponsePair(const QString &prompt)
@ -292,17 +280,16 @@ bool Chat::isRecalc() const
void Chat::loadDefaultModel()
{
emit defaultModelChanged(modelList().first());
m_modelLoadingError = QString();
emit modelLoadingErrorChanged();
emit loadDefaultModelRequested();
}
void Chat::loadModel(const QString &modelName)
void Chat::loadModel(const ModelInfo &modelInfo)
{
m_modelLoadingError = QString();
emit modelLoadingErrorChanged();
emit loadModelRequested(modelName);
emit loadModelRequested(modelInfo);
}
void Chat::unloadAndDeleteLater()
@ -361,13 +348,22 @@ void Chat::handleDatabaseResultsChanged(const QList<ResultInfo> &results)
m_databaseResults = results;
}
void Chat::handleModelInfoChanged(const ModelInfo &modelInfo)
{
if (m_modelInfo == modelInfo)
return;
m_modelInfo = modelInfo;
emit modelInfoChanged();
}
bool Chat::serialize(QDataStream &stream, int version) const
{
stream << m_creationDate;
stream << m_id;
stream << m_name;
stream << m_userName;
stream << m_modelName;
stream << m_modelInfo.filename;
if (version > 2)
stream << m_collections;
if (!m_llmodel->serialize(stream, version))
@ -385,17 +381,23 @@ bool Chat::deserialize(QDataStream &stream, int version)
stream >> m_name;
stream >> m_userName;
emit nameChanged();
stream >> m_modelName;
emit modelNameChanged();
QString filename;
stream >> filename;
if (!ModelList::globalInstance()->contains(filename))
return false;
m_modelInfo = ModelList::globalInstance()->modelInfo(filename);
emit modelInfoChanged();
// Prior to version 2 gptj models had a bug that fixed the kv_cache to F32 instead of F16 so
// unfortunately, we cannot deserialize these
if (version < 2 && m_modelName.contains("gpt4all-j"))
if (version < 2 && m_modelInfo.filename.contains("gpt4all-j"))
return false;
if (version > 2) {
stream >> m_collections;
emit collectionListChanged(m_collections);
}
m_llmodel->setModelName(m_modelName);
m_llmodel->setModelInfo(m_modelInfo);
if (!m_llmodel->deserialize(stream, version))
return false;
if (!m_chatModel->deserialize(stream, version))
@ -404,103 +406,6 @@ bool Chat::deserialize(QDataStream &stream, int version)
return stream.status() == QDataStream::Ok;
}
QList<QString> Chat::modelList() const
{
// Build a model list from exepath and from the localpath
QList<QString> list;
QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
QString localPath = Download::globalInstance()->downloadLocalModelsPath();
QSettings settings;
settings.sync();
// The user default model can be set by the user in the settings dialog. The "default" user
// default model is "Application default" which signals we should use the default model that was
// specified by the models.json file.
QString defaultModel = settings.value("userDefaultModel").toString();
if (defaultModel.isEmpty() || defaultModel == "Application default")
defaultModel = settings.value("defaultModel").toString();
QString currentModelName = modelName().isEmpty() ? defaultModel : modelName();
{
QDir dir(exePath);
QStringList allFiles = dir.entryList(QDir::Files);
// All files that end with .bin and have 'ggml' somewhere in the name
QStringList fileNames;
for(const QString& filename : allFiles) {
if (filename.endsWith(".bin") && filename.contains("ggml")) {
fileNames.append(filename);
}
}
for (const QString& f : fileNames) {
QString filePath = exePath + f;
QFileInfo info(filePath);
QString basename = info.completeBaseName();
QString name = basename.startsWith("ggml-") ? basename.remove(0, 5) : basename;
if (info.exists()) {
if (name == currentModelName)
list.prepend(name);
else
list.append(name);
}
}
}
if (localPath != exePath) {
QDir dir(localPath);
QStringList allFiles = dir.entryList(QDir::Files);
QStringList fileNames;
for(const QString& filename : allFiles) {
if ((filename.endsWith(".bin") && filename.contains("ggml"))
|| (filename.endsWith(".txt") && filename.startsWith("chatgpt-"))) {
fileNames.append(filename);
}
}
for (const QString &f : fileNames) {
QString filePath = localPath + f;
QFileInfo info(filePath);
QString basename = info.completeBaseName();
QString name = basename.startsWith("ggml-") ? basename.remove(0, 5) : basename;
if (info.exists() && !list.contains(name)) { // don't allow duplicates
if (name == currentModelName)
list.prepend(name);
else
list.append(name);
}
}
}
if (list.isEmpty()) {
if (exePath != localPath) {
qWarning() << "ERROR: Could not find any applicable models in"
<< exePath << "nor" << localPath;
} else {
qWarning() << "ERROR: Could not find any applicable models in"
<< exePath;
}
return QList<QString>();
}
return list;
}
void Chat::handleModelListChanged()
{
emit modelListChanged();
emit defaultModelChanged(modelList().first());
}
void Chat::handleDownloadLocalModelsPathChanged()
{
emit modelListChanged();
emit defaultModelChanged(modelList().first());
m_watcher->addPath(Download::globalInstance()->downloadLocalModelsPath());
}
QList<QString> Chat::collectionList() const
{
return m_collections;

View File

@ -17,10 +17,9 @@ class Chat : public QObject
Q_PROPERTY(ChatModel *chatModel READ chatModel NOTIFY chatModelChanged)
Q_PROPERTY(bool isModelLoaded READ isModelLoaded NOTIFY isModelLoadedChanged)
Q_PROPERTY(QString response READ response NOTIFY responseChanged)
Q_PROPERTY(QString modelName READ modelName WRITE setModelName NOTIFY modelNameChanged)
Q_PROPERTY(ModelInfo modelInfo READ modelInfo WRITE setModelInfo NOTIFY modelInfoChanged)
Q_PROPERTY(bool responseInProgress READ responseInProgress NOTIFY responseInProgressChanged)
Q_PROPERTY(bool isRecalc READ isRecalc NOTIFY recalcChanged)
Q_PROPERTY(QList<QString> modelList READ modelList NOTIFY modelListChanged)
Q_PROPERTY(bool isServer READ isServer NOTIFY isServerChanged)
Q_PROPERTY(QString responseState READ responseState NOTIFY responseStateChanged)
Q_PROPERTY(QList<QString> collectionList READ collectionList NOTIFY collectionListChanged)
@ -66,12 +65,12 @@ public:
QString response() const;
bool responseInProgress() const { return m_responseInProgress; }
QString responseState() const;
QString modelName() const;
void setModelName(const QString &modelName);
ModelInfo modelInfo() const;
void setModelInfo(const ModelInfo &modelInfo);
bool isRecalc() const;
void loadDefaultModel();
void loadModel(const QString &modelName);
void loadModel(const ModelInfo &modelInfo);
void unloadModel();
void reloadModel();
void unloadAndDeleteLater();
@ -79,8 +78,6 @@ public:
qint64 creationDate() const { return m_creationDate; }
bool serialize(QDataStream &stream, int version) const;
bool deserialize(QDataStream &stream, int version);
QList<QString> modelList() const;
bool isServer() const { return m_isServer; }
QList<QString> collectionList() const;
@ -111,18 +108,16 @@ Q_SIGNALS:
void regenerateResponseRequested();
void resetResponseRequested();
void resetContextRequested();
void modelNameChangeRequested(const QString &modelName);
void modelNameChanged();
void modelChangeRequested(const ModelInfo &modelInfo);
void modelInfoChanged();
void recalcChanged();
void loadDefaultModelRequested();
void loadModelRequested(const QString &modelName);
void loadModelRequested(const ModelInfo &modelInfo);
void generateNameRequested();
void modelListChanged();
void modelLoadingErrorChanged();
void isServerChanged();
void collectionListChanged(const QList<QString> &collectionList);
void tokenSpeedChanged();
void defaultModelChanged(const QString &defaultModel);
private Q_SLOTS:
void handleResponseChanged(const QString &response);
@ -134,15 +129,14 @@ private Q_SLOTS:
void handleModelLoadingError(const QString &error);
void handleTokenSpeedChanged(const QString &tokenSpeed);
void handleDatabaseResultsChanged(const QList<ResultInfo> &results);
void handleModelListChanged();
void handleDownloadLocalModelsPathChanged();
void handleModelInfoChanged(const ModelInfo &modelInfo);
private:
QString m_id;
QString m_name;
QString m_generatedName;
QString m_userName;
QString m_modelName;
ModelInfo m_modelInfo;
QString m_modelLoadingError;
QString m_tokenSpeed;
QString m_response;
@ -156,7 +150,6 @@ private:
bool m_isServer;
bool m_shouldDeleteLater;
bool m_isModelLoaded;
QFileSystemWatcher *m_watcher;
};
#endif // CHAT_H

View File

@ -1,5 +1,4 @@
#include "chatlistmodel.h"
#include "download.h"
#include "llm.h"
#include <QFile>
@ -8,8 +7,15 @@
#define CHAT_FORMAT_MAGIC 0xF5D553CC
#define CHAT_FORMAT_VERSION 4
ChatListModel::ChatListModel(QObject *parent)
: QAbstractListModel(parent)
class MyChatListModel: public ChatListModel { };
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
ChatListModel *ChatListModel::globalInstance()
{
return chatListModelInstance();
}
ChatListModel::ChatListModel()
: QAbstractListModel(nullptr)
, m_newChat(nullptr)
, m_dummyChat(nullptr)
, m_serverChat(nullptr)
@ -54,7 +60,7 @@ void ChatListModel::setShouldSaveChatGPTChats(bool b)
void ChatListModel::removeChatFile(Chat *chat) const
{
Q_ASSERT(chat != m_serverChat);
const QString savePath = Download::globalInstance()->downloadLocalModelsPath();
const QString savePath = ModelList::globalInstance()->localModelsPath();
QFile file(savePath + "/gpt4all-" + chat->id() + ".chat");
if (!file.exists())
return;
@ -72,12 +78,12 @@ ChatSaver::ChatSaver()
void ChatListModel::saveChats()
{
const QString savePath = Download::globalInstance()->downloadLocalModelsPath();
const QString savePath = ModelList::globalInstance()->localModelsPath();
QVector<Chat*> toSave;
for (Chat *chat : m_chats) {
if (chat == m_serverChat)
continue;
const bool isChatGPT = chat->modelName().startsWith("chatgpt-");
const bool isChatGPT = chat->modelInfo().isChatGPT;
if (!isChatGPT && !m_shouldSaveChats)
continue;
if (isChatGPT && !m_shouldSaveChatGPTChats)
@ -99,7 +105,7 @@ void ChatSaver::saveChats(const QVector<Chat *> &chats)
{
QElapsedTimer timer;
timer.start();
const QString savePath = Download::globalInstance()->downloadLocalModelsPath();
const QString savePath = ModelList::globalInstance()->localModelsPath();
for (Chat *chat : chats) {
QString fileName = "gpt4all-" + chat->id() + ".chat";
QFile file(savePath + "/" + fileName);
@ -162,7 +168,7 @@ void ChatsRestoreThread::run()
}
}
{
const QString savePath = Download::globalInstance()->downloadLocalModelsPath();
const QString savePath = ModelList::globalInstance()->localModelsPath();
QDir dir(savePath);
dir.setNameFilters(QStringList() << "gpt4all-*.chat");
QStringList fileNames = dir.entryList();

View File

@ -40,7 +40,7 @@ class ChatListModel : public QAbstractListModel
Q_PROPERTY(bool shouldSaveChatGPTChats READ shouldSaveChatGPTChats WRITE setShouldSaveChatGPTChats NOTIFY shouldSaveChatGPTChatsChanged)
public:
explicit ChatListModel(QObject *parent = nullptr);
static ChatListModel *globalInstance();
enum Roles {
IdRole = Qt::UserRole + 1,
@ -262,6 +262,11 @@ private:
Chat* m_serverChat;
Chat* m_currentChat;
QList<Chat*> m_chats;
private:
explicit ChatListModel();
~ChatListModel() {}
friend class MyChatListModel;
};
#endif // CHATITEMMODEL_H

View File

@ -1,9 +1,9 @@
#include "chatllm.h"
#include "chat.h"
#include "download.h"
#include "chatgpt.h"
#include "modellist.h"
#include "network.h"
#include "../gpt4all-backend/llmodel.h"
#include "chatgpt.h"
#include <QCoreApplication>
#include <QDir>
@ -20,29 +20,6 @@
#define REPLIT_INTERNAL_STATE_VERSION 0
#define LLAMA_INTERNAL_STATE_VERSION 0
static QString modelFilePath(const QString &modelName, bool isChatGPT)
{
QVector<QString> possibleFilePaths;
if (isChatGPT)
possibleFilePaths << "/" + modelName + ".txt";
else {
possibleFilePaths << "/ggml-" + modelName + ".bin";
possibleFilePaths << "/" + modelName + ".bin";
}
for (const QString &modelFilename : possibleFilePaths) {
QString appPath = QCoreApplication::applicationDirPath() + modelFilename;
QFileInfo infoAppPath(appPath);
if (infoAppPath.exists())
return appPath;
QString downloadPath = Download::globalInstance()->downloadLocalModelsPath() + modelFilename;
QFileInfo infoLocalPath(downloadPath);
if (infoLocalPath.exists())
return downloadPath;
}
return QString();
}
class LLModelStore {
public:
static LLModelStore *globalInstance();
@ -102,7 +79,6 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer)
connect(this, &ChatLLM::shouldBeLoadedChanged, this, &ChatLLM::handleShouldBeLoadedChanged,
Qt::QueuedConnection); // explicitly queued
connect(parent, &Chat::idChanged, this, &ChatLLM::handleChatIdChanged);
connect(parent, &Chat::defaultModelChanged, this, &ChatLLM::handleDefaultModelChanged);
connect(&m_llmThread, &QThread::started, this, &ChatLLM::handleThreadStarted);
// The following are blocking operations and will block the llm thread
@ -121,8 +97,8 @@ ChatLLM::~ChatLLM()
// The only time we should have a model loaded here is on shutdown
// as we explicitly unload the model in all other circumstances
if (isModelLoaded()) {
delete m_modelInfo.model;
m_modelInfo.model = nullptr;
delete m_llModelInfo.model;
m_llModelInfo.model = nullptr;
}
}
@ -135,14 +111,15 @@ void ChatLLM::handleThreadStarted()
bool ChatLLM::loadDefaultModel()
{
if (m_defaultModel.isEmpty()) {
emit modelLoadingError(QString("Could not find default model to load"));
ModelInfo defaultModel = ModelList::globalInstance()->defaultModelInfo();
if (defaultModel.filename.isEmpty()) {
emit modelLoadingError(QString("Could not find any model to load"));
return false;
}
return loadModel(m_defaultModel);
return loadModel(defaultModel);
}
bool ChatLLM::loadModel(const QString &modelName)
bool ChatLLM::loadModel(const ModelInfo &modelInfo)
{
// This is a complicated method because N different possible threads are interested in the outcome
// of this method. Why? Because we have a main/gui thread trying to monitor the state of N different
@ -153,11 +130,11 @@ bool ChatLLM::loadModel(const QString &modelName)
// to provide an overview of what we're doing here.
// We're already loaded with this model
if (isModelLoaded() && this->modelName() == modelName)
if (isModelLoaded() && this->modelInfo() == modelInfo)
return true;
bool isChatGPT = modelName.startsWith("chatgpt-");
QString filePath = modelFilePath(modelName, isChatGPT);
bool isChatGPT = modelInfo.isChatGPT;
QString filePath = modelInfo.dirpath + modelInfo.filename;
QFileInfo fileInfo(filePath);
// We have a live model, but it isn't the one we want
@ -165,36 +142,36 @@ bool ChatLLM::loadModel(const QString &modelName)
if (alreadyAcquired) {
resetContext();
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "already acquired model deleted" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "already acquired model deleted" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
delete m_modelInfo.model;
m_modelInfo.model = nullptr;
delete m_llModelInfo.model;
m_llModelInfo.model = nullptr;
emit isModelLoadedChanged(false);
} else if (!m_isServer) {
// This is a blocking call that tries to retrieve the model we need from the model store.
// If it succeeds, then we just have to restore state. If the store has never had a model
// returned to it, then the modelInfo.model pointer should be null which will happen on startup
m_modelInfo = LLModelStore::globalInstance()->acquireModel();
m_llModelInfo = LLModelStore::globalInstance()->acquireModel();
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "acquired model from store" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "acquired model from store" << m_llmThread.objectName() << m_llModelInfo3.model;
#endif
// At this point it is possible that while we were blocked waiting to acquire the model from the
// store, that our state was changed to not be loaded. If this is the case, release the model
// back into the store and quit loading
if (!m_shouldBeLoaded) {
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "no longer need model" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "no longer need model" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
LLModelStore::globalInstance()->releaseModel(m_modelInfo);
m_modelInfo = LLModelInfo();
LLModelStore::globalInstance()->releaseModel(m_llModelInfo);
m_llModelInfo = LLModelInfo();
emit isModelLoadedChanged(false);
return false;
}
// Check if the store just gave us exactly the model we were looking for
if (m_modelInfo.model && m_modelInfo.fileInfo == fileInfo) {
if (m_llModelInfo.model && m_llModelInfo.fileInfo == fileInfo) {
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "store had our model" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "store had our model" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
restoreState();
emit isModelLoadedChanged(true);
@ -202,18 +179,18 @@ bool ChatLLM::loadModel(const QString &modelName)
} else {
// Release the memory since we have to switch to a different model.
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "deleting model" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "deleting model" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
delete m_modelInfo.model;
m_modelInfo.model = nullptr;
delete m_llModelInfo.model;
m_llModelInfo.model = nullptr;
}
}
// Guarantee we've released the previous models memory
Q_ASSERT(!m_modelInfo.model);
Q_ASSERT(!m_llModelInfo.model);
// Store the file info in the modelInfo in case we have an error loading
m_modelInfo.fileInfo = fileInfo;
m_llModelInfo.fileInfo = fileInfo;
if (fileInfo.exists()) {
if (isChatGPT) {
@ -226,46 +203,46 @@ bool ChatLLM::loadModel(const QString &modelName)
apiKey = stream.readAll();
file.close();
}
m_modelType = LLModelType::CHATGPT_;
m_llModelType = LLModelType::CHATGPT_;
ChatGPT *model = new ChatGPT();
model->setModelName(chatGPTModel);
model->setAPIKey(apiKey);
m_modelInfo.model = model;
m_llModelInfo.model = model;
} else {
m_modelInfo.model = LLModel::construct(filePath.toStdString());
if (m_modelInfo.model) {
bool success = m_modelInfo.model->loadModel(filePath.toStdString());
m_llModelInfo.model = LLModel::construct(filePath.toStdString());
if (m_llModelInfo.model) {
bool success = m_llModelInfo.model->loadModel(filePath.toStdString());
if (!success) {
delete std::exchange(m_modelInfo.model, nullptr);
delete std::exchange(m_llModelInfo.model, nullptr);
if (!m_isServer)
LLModelStore::globalInstance()->releaseModel(m_modelInfo); // release back into the store
m_modelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not load model due to invalid model file for %1").arg(modelName));
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not load model due to invalid model file for %1").arg(modelInfo.filename));
} else {
switch (m_modelInfo.model->implementation().modelType[0]) {
case 'L': m_modelType = LLModelType::LLAMA_; break;
case 'G': m_modelType = LLModelType::GPTJ_; break;
case 'M': m_modelType = LLModelType::MPT_; break;
case 'R': m_modelType = LLModelType::REPLIT_; break;
switch (m_llModelInfo.model->implementation().modelType[0]) {
case 'L': m_llModelType = LLModelType::LLAMA_; break;
case 'G': m_llModelType = LLModelType::GPTJ_; break;
case 'M': m_llModelType = LLModelType::MPT_; break;
case 'R': m_llModelType = LLModelType::REPLIT_; break;
default:
{
delete std::exchange(m_modelInfo.model, nullptr);
delete std::exchange(m_llModelInfo.model, nullptr);
if (!m_isServer)
LLModelStore::globalInstance()->releaseModel(m_modelInfo); // release back into the store
m_modelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not determine model type for %1").arg(modelName));
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not determine model type for %1").arg(modelInfo.filename));
}
}
}
} else {
if (!m_isServer)
LLModelStore::globalInstance()->releaseModel(m_modelInfo); // release back into the store
m_modelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not load model due to invalid format for %1").arg(modelName));
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not load model due to invalid format for %1").arg(modelInfo.filename));
}
}
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "new model" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "new model" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
restoreState();
#if defined(DEBUG)
@ -282,31 +259,27 @@ bool ChatLLM::loadModel(const QString &modelName)
emit sendModelLoaded();
} else {
if (!m_isServer)
LLModelStore::globalInstance()->releaseModel(m_modelInfo); // release back into the store
m_modelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not find file for model %1").arg(modelName));
LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not find file for model %1").arg(modelInfo.filename));
}
if (m_modelInfo.model) {
QString basename = fileInfo.completeBaseName();
if (basename.startsWith("ggml-")) // remove the ggml- prefix
basename.remove(0, 5);
setModelName(basename);
}
if (m_llModelInfo.model)
setModelInfo(modelInfo);
return m_modelInfo.model;
return m_llModelInfo.model;
}
bool ChatLLM::isModelLoaded() const
{
return m_modelInfo.model && m_modelInfo.model->isModelLoaded();
return m_llModelInfo.model && m_llModelInfo.model->isModelLoaded();
}
void ChatLLM::regenerateResponse()
{
// ChatGPT uses a different semantic meaning for n_past than local models. For ChatGPT, the meaning
// of n_past is of the number of prompt/response pairs, rather than for total tokens.
if (m_modelType == LLModelType::CHATGPT_)
if (m_llModelType == LLModelType::CHATGPT_)
m_ctx.n_past -= 1;
else
m_ctx.n_past -= m_promptResponseTokens;
@ -357,20 +330,20 @@ QString ChatLLM::response() const
return QString::fromStdString(remove_leading_whitespace(m_response));
}
QString ChatLLM::modelName() const
ModelInfo ChatLLM::modelInfo() const
{
return m_modelName;
return m_modelInfo;
}
void ChatLLM::setModelName(const QString &modelName)
void ChatLLM::setModelInfo(const ModelInfo &modelInfo)
{
m_modelName = modelName;
emit modelNameChanged();
m_modelInfo = modelInfo;
emit modelInfoChanged(modelInfo);
}
void ChatLLM::modelNameChangeRequested(const QString &modelName)
void ChatLLM::modelChangeRequested(const ModelInfo &modelInfo)
{
loadModel(modelName);
loadModel(modelInfo);
}
bool ChatLLM::handlePrompt(int32_t token)
@ -454,13 +427,13 @@ bool ChatLLM::prompt(const QList<QString> &collectionList, const QString &prompt
m_ctx.n_batch = n_batch;
m_ctx.repeat_penalty = repeat_penalty;
m_ctx.repeat_last_n = repeat_penalty_tokens;
m_modelInfo.model->setThreadCount(n_threads);
m_llModelInfo.model->setThreadCount(n_threads);
#if defined(DEBUG)
printf("%s", qPrintable(instructPrompt));
fflush(stdout);
#endif
m_timer->start();
m_modelInfo.model->prompt(instructPrompt.toStdString(), promptFunc, responseFunc, recalcFunc, m_ctx);
m_llModelInfo.model->prompt(instructPrompt.toStdString(), promptFunc, responseFunc, recalcFunc, m_ctx);
#if defined(DEBUG)
printf("\n");
fflush(stdout);
@ -478,7 +451,7 @@ bool ChatLLM::prompt(const QList<QString> &collectionList, const QString &prompt
void ChatLLM::setShouldBeLoaded(bool b)
{
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "setShouldBeLoaded" << m_llmThread.objectName() << b << m_modelInfo.model;
qDebug() << "setShouldBeLoaded" << m_llmThread.objectName() << b << m_llModelInfo.model;
#endif
m_shouldBeLoaded = b; // atomic
emit shouldBeLoadedChanged();
@ -505,10 +478,10 @@ void ChatLLM::unloadModel()
saveState();
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "unloadModel" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "unloadModel" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
LLModelStore::globalInstance()->releaseModel(m_modelInfo);
m_modelInfo = LLModelInfo();
LLModelStore::globalInstance()->releaseModel(m_llModelInfo);
m_llModelInfo = LLModelInfo();
emit isModelLoadedChanged(false);
}
@ -518,10 +491,10 @@ void ChatLLM::reloadModel()
return;
#if defined(DEBUG_MODEL_LOADING)
qDebug() << "reloadModel" << m_llmThread.objectName() << m_modelInfo.model;
qDebug() << "reloadModel" << m_llmThread.objectName() << m_llModelInfo.model;
#endif
const QString m = modelName();
if (m.isEmpty())
const ModelInfo m = modelInfo();
if (m.name.isEmpty())
loadDefaultModel();
else
loadModel(m);
@ -545,7 +518,7 @@ void ChatLLM::generateName()
printf("%s", qPrintable(instructPrompt));
fflush(stdout);
#endif
m_modelInfo.model->prompt(instructPrompt.toStdString(), promptFunc, responseFunc, recalcFunc, ctx);
m_llModelInfo.model->prompt(instructPrompt.toStdString(), promptFunc, responseFunc, recalcFunc, ctx);
#if defined(DEBUG)
printf("\n");
fflush(stdout);
@ -562,11 +535,6 @@ void ChatLLM::handleChatIdChanged(const QString &id)
m_llmThread.setObjectName(id);
}
void ChatLLM::handleDefaultModelChanged(const QString &defaultModel)
{
m_defaultModel = defaultModel;
}
bool ChatLLM::handleNamePrompt(int32_t token)
{
Q_UNUSED(token);
@ -595,8 +563,8 @@ bool ChatLLM::handleNameRecalculate(bool isRecalc)
bool ChatLLM::serialize(QDataStream &stream, int version)
{
if (version > 1) {
stream << m_modelType;
switch (m_modelType) {
stream << m_llModelType;
switch (m_llModelType) {
case REPLIT_: stream << REPLIT_INTERNAL_STATE_VERSION; break;
case MPT_: stream << MPT_INTERNAL_STATE_VERSION; break;
case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break;
@ -629,7 +597,7 @@ bool ChatLLM::deserialize(QDataStream &stream, int version)
{
if (version > 1) {
int internalStateVersion;
stream >> m_modelType;
stream >> m_llModelType;
stream >> internalStateVersion; // for future use
}
QString response;
@ -670,21 +638,21 @@ void ChatLLM::saveState()
if (!isModelLoaded())
return;
if (m_modelType == LLModelType::CHATGPT_) {
if (m_llModelType == LLModelType::CHATGPT_) {
m_state.clear();
QDataStream stream(&m_state, QIODeviceBase::WriteOnly);
stream.setVersion(QDataStream::Qt_6_5);
ChatGPT *chatGPT = static_cast<ChatGPT*>(m_modelInfo.model);
ChatGPT *chatGPT = static_cast<ChatGPT*>(m_llModelInfo.model);
stream << chatGPT->context();
return;
}
const size_t stateSize = m_modelInfo.model->stateSize();
const size_t stateSize = m_llModelInfo.model->stateSize();
m_state.resize(stateSize);
#if defined(DEBUG)
qDebug() << "saveState" << m_llmThread.objectName() << "size:" << m_state.size();
#endif
m_modelInfo.model->saveState(static_cast<uint8_t*>(reinterpret_cast<void*>(m_state.data())));
m_llModelInfo.model->saveState(static_cast<uint8_t*>(reinterpret_cast<void*>(m_state.data())));
}
void ChatLLM::restoreState()
@ -692,10 +660,10 @@ void ChatLLM::restoreState()
if (!isModelLoaded() || m_state.isEmpty())
return;
if (m_modelType == LLModelType::CHATGPT_) {
if (m_llModelType == LLModelType::CHATGPT_) {
QDataStream stream(&m_state, QIODeviceBase::ReadOnly);
stream.setVersion(QDataStream::Qt_6_5);
ChatGPT *chatGPT = static_cast<ChatGPT*>(m_modelInfo.model);
ChatGPT *chatGPT = static_cast<ChatGPT*>(m_llModelInfo.model);
QList<QString> context;
stream >> context;
chatGPT->setContext(context);
@ -707,7 +675,7 @@ void ChatLLM::restoreState()
#if defined(DEBUG)
qDebug() << "restoreState" << m_llmThread.objectName() << "size:" << m_state.size();
#endif
m_modelInfo.model->restoreState(static_cast<const uint8_t*>(reinterpret_cast<void*>(m_state.data())));
m_llModelInfo.model->restoreState(static_cast<const uint8_t*>(reinterpret_cast<void*>(m_state.data())));
m_state.clear();
m_state.resize(0);
}

View File

@ -6,6 +6,7 @@
#include <QFileInfo>
#include "localdocs.h"
#include "modellist.h"
#include "../gpt4all-backend/llmodel.h"
enum LLModelType {
@ -67,12 +68,7 @@ class Chat;
class ChatLLM : public QObject
{
Q_OBJECT
Q_PROPERTY(bool isModelLoaded READ isModelLoaded NOTIFY isModelLoadedChanged)
Q_PROPERTY(QString response READ response NOTIFY responseChanged)
Q_PROPERTY(QString modelName READ modelName WRITE setModelName NOTIFY modelNameChanged)
Q_PROPERTY(bool isRecalc READ isRecalc NOTIFY recalcChanged)
Q_PROPERTY(QString generatedName READ generatedName NOTIFY generatedNameChanged)
public:
ChatLLM(Chat *parent, bool isServer = false);
virtual ~ChatLLM();
@ -88,9 +84,9 @@ public:
void setShouldBeLoaded(bool b);
QString response() const;
QString modelName() const;
void setModelName(const QString &modelName);
ModelInfo modelInfo() const;
void setModelInfo(const ModelInfo &info);
bool isRecalc() const { return m_isRecalc; }
@ -104,25 +100,23 @@ public Q_SLOTS:
int32_t n_predict, int32_t top_k, float top_p, float temp, int32_t n_batch, float repeat_penalty,
int32_t repeat_penalty_tokens, int32_t n_threads);
bool loadDefaultModel();
bool loadModel(const QString &modelName);
void modelNameChangeRequested(const QString &modelName);
bool loadModel(const ModelInfo &modelInfo);
void modelChangeRequested(const ModelInfo &modelInfo);
void forceUnloadModel();
void unloadModel();
void reloadModel();
void generateName();
void handleChatIdChanged(const QString &id);
void handleDefaultModelChanged(const QString &defaultModel);
void handleShouldBeLoadedChanged();
void handleThreadStarted();
Q_SIGNALS:
void recalcChanged();
void isModelLoadedChanged(bool);
void modelLoadingError(const QString &error);
void responseChanged(const QString &response);
void promptProcessing();
void responseStopped();
void modelNameChanged();
void recalcChanged();
void sendStartup();
void sendModelLoaded();
void generatedNameChanged(const QString &name);
@ -132,6 +126,7 @@ Q_SIGNALS:
void requestRetrieveFromDB(const QList<QString> &collections, const QString &text, int retrievalSize, QList<ResultInfo> *results);
void reportSpeed(const QString &speed);
void databaseResultsChanged(const QList<ResultInfo>&);
void modelInfoChanged(const ModelInfo &modelInfo);
protected:
bool handlePrompt(int32_t token);
@ -151,10 +146,9 @@ protected:
private:
std::string m_response;
std::string m_nameResponse;
LLModelInfo m_modelInfo;
LLModelType m_modelType;
QString m_modelName;
QString m_defaultModel;
LLModelInfo m_llModelInfo;
LLModelType m_llModelType;
ModelInfo m_modelInfo;
TokenTimer *m_timer;
QByteArray m_state;
QThread m_llmThread;

View File

@ -1,5 +1,5 @@
#include "database.h"
#include "download.h"
#include "modellist.h"
#include <QTimer>
#include <QPdfDocument>
@ -415,7 +415,7 @@ bool selectDocuments(QSqlQuery &q, int folder_id, QList<int> *documentIds) {
QSqlError initDb()
{
QString dbPath = Download::globalInstance()->downloadLocalModelsPath()
QString dbPath = ModelList::globalInstance()->localModelsPath()
+ QString("localdocs_v%1.db").arg(LOCALDOCS_VERSION);
QSqlDatabase db = QSqlDatabase::addDatabase("QSQLITE");
db.setDatabaseName(dbPath);

View File

@ -1,5 +1,6 @@
#include "download.h"
#include "network.h"
#include "modellist.h"
#include <QCoreApplication>
#include <QNetworkRequest>
@ -29,13 +30,9 @@ Download::Download()
&Download::handleHashAndSaveFinished, Qt::QueuedConnection);
connect(&m_networkManager, &QNetworkAccessManager::sslErrors, this,
&Download::handleSslErrors);
connect(this, &Download::downloadLocalModelsPathChanged, this, &Download::updateModelList);
connect(ModelList::globalInstance(), &ModelList::localModelsPathChanged, this, &Download::updateModelList);
updateModelList();
updateReleaseNotes();
QSettings settings;
settings.sync();
m_downloadLocalModelsPath = settings.value("modelPath",
defaultLocalModelsPath()).toString();
m_startTime = QDateTime::currentDateTime();
}
@ -65,47 +62,6 @@ bool compareVersions(const QString &a, const QString &b) {
return aParts.size() > bParts.size();
}
QList<ModelInfo> Download::modelList() const
{
// We make sure the default model is listed first
QList<ModelInfo> values = m_modelMap.values();
ModelInfo defaultInfo;
ModelInfo bestGPTJInfo;
ModelInfo bestLlamaInfo;
ModelInfo bestMPTInfo;
QList<ModelInfo> filtered;
for (const ModelInfo &v : values) {
if (v.isDefault)
defaultInfo = v;
if (v.bestGPTJ)
bestGPTJInfo = v;
if (v.bestLlama)
bestLlamaInfo = v;
if (v.bestMPT)
bestMPTInfo = v;
filtered.append(v);
}
Q_ASSERT(defaultInfo == bestGPTJInfo || defaultInfo == bestLlamaInfo || defaultInfo == bestMPTInfo);
if (bestLlamaInfo.bestLlama) {
filtered.removeAll(bestLlamaInfo);
filtered.prepend(bestLlamaInfo);
}
if (bestGPTJInfo.bestGPTJ) {
filtered.removeAll(bestGPTJInfo);
filtered.prepend(bestGPTJInfo);
}
if (bestMPTInfo.bestMPT) {
filtered.removeAll(bestMPTInfo);
filtered.prepend(bestMPTInfo);
}
return filtered;
}
ReleaseInfo Download::releaseInfo() const
{
const QString currentVersion = QCoreApplication::applicationVersion();
@ -124,20 +80,6 @@ bool Download::hasNewerRelease() const
return compareVersions(versions.first(), currentVersion);
}
QString Download::downloadLocalModelsPath() const {
return m_downloadLocalModelsPath;
}
void Download::setDownloadLocalModelsPath(const QString &modelPath) {
QString filePath = (modelPath.startsWith("file://") ?
QUrl(modelPath).toLocalFile() : modelPath);
QString canonical = QFileInfo(filePath).canonicalFilePath() + "/";
if (m_downloadLocalModelsPath != canonical) {
m_downloadLocalModelsPath = canonical;
emit downloadLocalModelsPathChanged();
}
}
bool Download::isFirstStart() const
{
QSettings settings;
@ -149,42 +91,10 @@ bool Download::isFirstStart() const
return first;
}
QString Download::incompleteDownloadPath(const QString &modelFile) {
QString downloadPath = downloadLocalModelsPath() + "incomplete-" +
modelFile;
return downloadPath;
}
QString Download::defaultLocalModelsPath() const
{
QString localPath = QStandardPaths::writableLocation(QStandardPaths::AppLocalDataLocation)
+ "/";
QString testWritePath = localPath + QString("test_write.txt");
QString canonicalLocalPath = QFileInfo(localPath).canonicalFilePath() + "/";
QDir localDir(localPath);
if (!localDir.exists()) {
if (!localDir.mkpath(localPath)) {
qWarning() << "ERROR: Local download directory can't be created:" << canonicalLocalPath;
return canonicalLocalPath;
}
}
if (QFileInfo::exists(testWritePath))
return canonicalLocalPath;
QFile testWriteFile(testWritePath);
if (testWriteFile.open(QIODeviceBase::ReadWrite)) {
testWriteFile.close();
return canonicalLocalPath;
}
qWarning() << "ERROR: Local download path appears not writeable:" << canonicalLocalPath;
return canonicalLocalPath;
}
void Download::updateModelList()
{
QUrl jsonUrl("http://gpt4all.io/models/models.json");
// QUrl jsonUrl("http://gpt4all.io/models/models.json");
QUrl jsonUrl("file:///home/atreat/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models.json");
QNetworkRequest request(jsonUrl);
QSslConfiguration conf = request.sslConfiguration();
conf.setPeerVerifyMode(QSslSocket::VerifyNone);
@ -206,34 +116,40 @@ void Download::updateReleaseNotes()
void Download::downloadModel(const QString &modelFile)
{
QFile *tempFile = new QFile(incompleteDownloadPath(modelFile));
QFile *tempFile = new QFile(ModelList::globalInstance()->incompleteDownloadPath(modelFile));
QDateTime modTime = tempFile->fileTime(QFile::FileModificationTime);
bool success = tempFile->open(QIODevice::WriteOnly | QIODevice::Append);
qWarning() << "Opening temp file for writing:" << tempFile->fileName();
if (!success) {
qWarning() << "ERROR: Could not open temp file:"
<< tempFile->fileName() << modelFile;
const QString error
= QString("ERROR: Could not open temp file: %1 %2").arg(tempFile->fileName()).arg(modelFile);
qWarning() << error;
ModelList::globalInstance()->updateData(modelFile, ModelList::DownloadErrorRole, error);
return;
}
tempFile->flush();
size_t incomplete_size = tempFile->size();
if (incomplete_size > 0) {
if (modTime < m_startTime) {
qWarning() << "File last modified before app started, rewinding by 1MB";
if (incomplete_size >= 1024 * 1024) {
incomplete_size -= 1024 * 1024;
} else {
incomplete_size = 0;
}
bool success = tempFile->seek(incomplete_size);
if (!success) {
incomplete_size = 0;
success = tempFile->seek(incomplete_size);
Q_ASSERT(success);
}
tempFile->seek(incomplete_size);
}
ModelInfo info = m_modelMap.value(modelFile);
if (!ModelList::globalInstance()->contains(modelFile)) {
qWarning() << "ERROR: Could not find file:" << modelFile;
return;
}
ModelList::globalInstance()->updateData(modelFile, ModelList::DownloadingRole, true);
ModelInfo info = ModelList::globalInstance()->modelInfo(modelFile);
QString url = !info.url.isEmpty() ? info.url : "http://gpt4all.io/models/" + modelFile;
Network::globalInstance()->sendDownloadStarted(modelFile);
QNetworkRequest request(url);
request.setAttribute(QNetworkRequest::User, modelFile);
request.setRawHeader("range", QString("bytes=%1-").arg(incomplete_size).toUtf8());
request.setRawHeader("range", QString("bytes=%1-").arg(tempFile->pos()).toUtf8());
QSslConfiguration conf = request.sslConfiguration();
conf.setPeerVerifyMode(QSslSocket::VerifyNone);
request.setSslConfiguration(conf);
@ -263,8 +179,7 @@ void Download::cancelDownload(const QString &modelFile)
tempFile->deleteLater();
m_activeDownloads.remove(modelReply);
// Emit downloadFinished signal for cleanup
emit downloadFinished(modelFile);
ModelList::globalInstance()->updateData(modelFile, ModelList::DownloadingRole, false);
break;
}
}
@ -277,37 +192,34 @@ void Download::installModel(const QString &modelFile, const QString &apiKey)
return;
Network::globalInstance()->sendInstallModel(modelFile);
QString filePath = downloadLocalModelsPath() + modelFile + ".txt";
QString filePath = ModelList::globalInstance()->localModelsPath() + modelFile + ".txt";
QFile file(filePath);
if (file.open(QIODeviceBase::WriteOnly | QIODeviceBase::Text)) {
QTextStream stream(&file);
stream << apiKey;
file.close();
ModelInfo info = m_modelMap.value(modelFile);
info.installed = true;
m_modelMap.insert(modelFile, info);
emit modelListChanged();
}
}
void Download::removeModel(const QString &modelFile)
{
const bool isChatGPT = modelFile.startsWith("chatgpt-");
const QString filePath = downloadLocalModelsPath()
+ modelFile
+ (isChatGPT ? ".txt" : QString());
QFile file(filePath);
if (!file.exists()) {
qWarning() << "ERROR: Cannot remove file that does not exist" << filePath;
return;
const QString filePath = ModelList::globalInstance()->localModelsPath() + modelFile;
QFile incompleteFile(ModelList::globalInstance()->incompleteDownloadPath(modelFile));
if (incompleteFile.exists()) {
incompleteFile.remove();
}
Network::globalInstance()->sendRemoveModel(modelFile);
ModelInfo info = m_modelMap.value(modelFile);
info.installed = false;
m_modelMap.insert(modelFile, info);
file.remove();
emit modelListChanged();
QFile file(filePath);
if (file.exists()) {
Network::globalInstance()->sendRemoveModel(modelFile);
file.remove();
}
ModelList::globalInstance()->updateData(modelFile, ModelList::BytesReceivedRole, 0);
ModelList::globalInstance()->updateData(modelFile, ModelList::BytesTotalRole, 0);
ModelList::globalInstance()->updateData(modelFile, ModelList::TimestampRole, 0);
ModelList::globalInstance()->updateData(modelFile, ModelList::SpeedRole, QString());
ModelList::globalInstance()->updateData(modelFile, ModelList::DownloadErrorRole, QString());
}
void Download::handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors)
@ -319,37 +231,12 @@ void Download::handleSslErrors(QNetworkReply *reply, const QList<QSslError> &err
void Download::handleModelsJsonDownloadFinished()
{
#if 0
QByteArray jsonData = QString(""
"["
" {"
" \"md5sum\": \"61d48a82cb188cceb14ebb8082bfec37\","
" \"filename\": \"ggml-gpt4all-j-v1.1-breezy.bin\","
" \"filesize\": \"3785248281\""
" },"
" {"
" \"md5sum\": \"879344aaa9d62fdccbda0be7a09e7976\","
" \"filename\": \"ggml-gpt4all-j-v1.2-jazzy.bin\","
" \"filesize\": \"3785248281\","
" \"isDefault\": \"true\""
" },"
" {"
" \"md5sum\": \"5b5a3f9b858d33b29b52b89692415595\","
" \"filesize\": \"3785248281\","
" \"filename\": \"ggml-gpt4all-j.bin\""
" }"
"]"
).toUtf8();
printf("%s\n", jsonData.toStdString().c_str());
fflush(stdout);
#else
QNetworkReply *jsonReply = qobject_cast<QNetworkReply *>(sender());
if (!jsonReply)
return;
QByteArray jsonData = jsonReply->readAll();
jsonReply->deleteLater();
#endif
parseModelsJsonFile(jsonData);
}
@ -358,18 +245,17 @@ void Download::parseModelsJsonFile(const QByteArray &jsonData)
QJsonParseError err;
QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);
if (err.error != QJsonParseError::NoError) {
qDebug() << "ERROR: Couldn't parse: " << jsonData << err.errorString();
qWarning() << "ERROR: Couldn't parse: " << jsonData << err.errorString();
return;
}
QString defaultModel;
QJsonArray jsonArray = document.array();
const QString currentVersion = QCoreApplication::applicationVersion();
m_modelMap.clear();
for (const QJsonValue &value : jsonArray) {
QJsonObject obj = value.toObject();
QString modelName = obj["name"].toString();
QString modelFilename = obj["filename"].toString();
QString modelFilesize = obj["filesize"].toString();
QString requiresVersion = obj["requires"].toString();
@ -377,10 +263,13 @@ void Download::parseModelsJsonFile(const QByteArray &jsonData)
QString url = obj["url"].toString();
QByteArray modelMd5sum = obj["md5sum"].toString().toLatin1().constData();
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == QString("true");
bool bestGPTJ = obj.contains("bestGPTJ") && obj["bestGPTJ"] == QString("true");
bool bestLlama = obj.contains("bestLlama") && obj["bestLlama"] == QString("true");
bool bestMPT = obj.contains("bestMPT") && obj["bestMPT"] == QString("true");
bool disableGUI = obj.contains("disableGUI") && obj["disableGUI"] == QString("true");
QString description = obj["description"].toString();
QString order = obj["order"].toString();
int ramrequired = obj["ramrequired"].toString().toInt();
QString parameters = obj["parameters"].toString();
QString quant = obj["quant"].toString();
QString type = obj["type"].toString();
// If the currentVersion version is strictly less than required version, then continue
if (!requiresVersion.isEmpty()
@ -395,77 +284,66 @@ void Download::parseModelsJsonFile(const QByteArray &jsonData)
continue;
}
if (isDefault)
defaultModel = modelFilename;
quint64 sz = modelFilesize.toULongLong();
if (sz < 1024) {
modelFilesize = QString("%1 bytes").arg(sz);
} else if (sz < 1024 * 1024) {
modelFilesize = QString("%1 KB").arg(qreal(sz) / 1024, 0, 'g', 3);
} else if (sz < 1024 * 1024 * 1024) {
modelFilesize = QString("%1 MB").arg(qreal(sz) / (1024 * 1024), 0, 'g', 3);
} else {
modelFilesize = QString("%1 GB").arg(qreal(sz) / (1024 * 1024 * 1024), 0, 'g', 3);
}
modelFilesize = ModelList::toFileSize(modelFilesize.toULongLong());
QString filePath = downloadLocalModelsPath() + modelFilename;
QFileInfo info(filePath);
ModelInfo modelInfo;
modelInfo.filename = modelFilename;
modelInfo.filesize = modelFilesize;
modelInfo.md5sum = modelMd5sum;
modelInfo.installed = info.exists();
modelInfo.isDefault = isDefault;
modelInfo.bestGPTJ = bestGPTJ;
modelInfo.bestLlama = bestLlama;
modelInfo.bestMPT = bestMPT;
modelInfo.description = description;
modelInfo.requiresVersion = requiresVersion;
modelInfo.deprecatedVersion = deprecatedVersion;
modelInfo.url = url;
m_modelMap.insert(modelInfo.filename, modelInfo);
if (!ModelList::globalInstance()->contains(modelFilename))
ModelList::globalInstance()->addModel(modelFilename);
if (!modelName.isEmpty())
ModelList::globalInstance()->updateData(modelFilename, ModelList::NameRole, modelName);
ModelList::globalInstance()->updateData(modelFilename, ModelList::FilesizeRole, modelFilesize);
ModelList::globalInstance()->updateData(modelFilename, ModelList::Md5sumRole, modelMd5sum);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DefaultRole, isDefault);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DescriptionRole, description);
ModelList::globalInstance()->updateData(modelFilename, ModelList::RequiresVersionRole, requiresVersion);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DeprecatedVersionRole, deprecatedVersion);
ModelList::globalInstance()->updateData(modelFilename, ModelList::UrlRole, url);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DisableGUIRole, disableGUI);
ModelList::globalInstance()->updateData(modelFilename, ModelList::OrderRole, order);
ModelList::globalInstance()->updateData(modelFilename, ModelList::RamrequiredRole, ramrequired);
ModelList::globalInstance()->updateData(modelFilename, ModelList::ParametersRole, parameters);
ModelList::globalInstance()->updateData(modelFilename, ModelList::QuantRole, quant);
ModelList::globalInstance()->updateData(modelFilename, ModelList::TypeRole, type);
}
const QString chatGPTDesc = tr("WARNING: requires personal OpenAI API key and usage of this "
"model will send your chats over the network to OpenAI. Your API key will be stored on disk "
"and only used to interact with OpenAI models. If you don't have one, you can apply for "
"an API key <a href=\"https://platform.openai.com/account/api-keys\">here.</a>");
const QString chatGPTDesc = tr("<ul><li>Requires personal OpenAI API key.</li><li>WARNING: Will send"
" your chats to OpenAI!</li><li>Your API key will be stored on disk</li><li>Will only be used"
" to communicate with OpenAI</li><li>You can apply for an API key"
" <a href=\"https://platform.openai.com/account/api-keys\">here.</a></li>");
{
ModelInfo modelInfo;
modelInfo.isChatGPT = true;
modelInfo.filename = "chatgpt-gpt-3.5-turbo";
modelInfo.description = tr("OpenAI's ChatGPT model gpt-3.5-turbo. ") + chatGPTDesc;
modelInfo.requiresVersion = "2.4.2";
QString filePath = downloadLocalModelsPath() + modelInfo.filename + ".txt";
QFileInfo info(filePath);
modelInfo.installed = info.exists();
m_modelMap.insert(modelInfo.filename, modelInfo);
const QString modelFilename = "chatgpt-gpt-3.5-turbo.txt";
if (!ModelList::globalInstance()->contains(modelFilename))
ModelList::globalInstance()->addModel(modelFilename);
ModelList::globalInstance()->updateData(modelFilename, ModelList::NameRole, "ChatGPT-3.5 Turbo");
ModelList::globalInstance()->updateData(modelFilename, ModelList::FilesizeRole, "minimal");
ModelList::globalInstance()->updateData(modelFilename, ModelList::ChatGPTRole, true);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DescriptionRole,
tr("<strong>OpenAI's ChatGPT model GPT-3.5 Turbo</strong><br>") + chatGPTDesc);
ModelList::globalInstance()->updateData(modelFilename, ModelList::RequiresVersionRole, "2.4.2");
ModelList::globalInstance()->updateData(modelFilename, ModelList::OrderRole, "ca");
ModelList::globalInstance()->updateData(modelFilename, ModelList::RamrequiredRole, 0);
ModelList::globalInstance()->updateData(modelFilename, ModelList::ParametersRole, "?");
ModelList::globalInstance()->updateData(modelFilename, ModelList::QuantRole, "NA");
ModelList::globalInstance()->updateData(modelFilename, ModelList::TypeRole, "GPT");
}
{
ModelInfo modelInfo;
modelInfo.isChatGPT = true;
modelInfo.filename = "chatgpt-gpt-4";
modelInfo.description = tr("OpenAI's ChatGPT model gpt-4. ") + chatGPTDesc;
modelInfo.requiresVersion = "2.4.2";
QString filePath = downloadLocalModelsPath() + modelInfo.filename + ".txt";
QFileInfo info(filePath);
modelInfo.installed = info.exists();
m_modelMap.insert(modelInfo.filename, modelInfo);
const QString modelFilename = "chatgpt-gpt-4.txt";
if (!ModelList::globalInstance()->contains(modelFilename))
ModelList::globalInstance()->addModel(modelFilename);
ModelList::globalInstance()->updateData(modelFilename, ModelList::NameRole, "ChatGPT-4");
ModelList::globalInstance()->updateData(modelFilename, ModelList::FilesizeRole, "minimal");
ModelList::globalInstance()->updateData(modelFilename, ModelList::ChatGPTRole, true);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DescriptionRole,
tr("<strong>OpenAI's ChatGPT model GPT-4</strong><br>") + chatGPTDesc);
ModelList::globalInstance()->updateData(modelFilename, ModelList::RequiresVersionRole, "2.4.2");
ModelList::globalInstance()->updateData(modelFilename, ModelList::OrderRole, "cb");
ModelList::globalInstance()->updateData(modelFilename, ModelList::RamrequiredRole, 0);
ModelList::globalInstance()->updateData(modelFilename, ModelList::ParametersRole, "?");
ModelList::globalInstance()->updateData(modelFilename, ModelList::QuantRole, "NA");
ModelList::globalInstance()->updateData(modelFilename, ModelList::TypeRole, "GPT");
}
// remove ggml- prefix and .bin suffix
if (defaultModel.startsWith("ggml-"))
defaultModel = defaultModel.remove(0, 5);
if (defaultModel.endsWith(".bin"))
defaultModel.chop(4);
QSettings settings;
settings.sync();
settings.setValue("defaultModel", defaultModel);
settings.sync();
emit modelListChanged();
}
void Download::handleReleaseJsonDownloadFinished()
@ -484,7 +362,7 @@ void Download::parseReleaseJsonFile(const QByteArray &jsonData)
QJsonParseError err;
QJsonDocument document = QJsonDocument::fromJson(jsonData, &err);
if (err.error != QJsonParseError::NoError) {
qDebug() << "ERROR: Couldn't parse: " << jsonData << err.errorString();
qWarning() << "ERROR: Couldn't parse: " << jsonData << err.errorString();
return;
}
@ -515,10 +393,13 @@ void Download::handleErrorOccurred(QNetworkReply::NetworkError code)
return;
QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();
qWarning() << "ERROR: Network error occurred attempting to download"
<< modelFilename
<< "code:" << code
<< "errorString" << modelReply->errorString();
const QString error
= QString("ERROR: Network error occurred attempting to download %1 code: %2 errorString %3")
.arg(modelFilename)
.arg(code)
.arg(modelReply->errorString());
qWarning() << error;
ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadErrorRole, error);
Network::globalInstance()->sendDownloadError(modelFilename, (int)code, modelReply->errorString());
cancelDownload(modelFilename);
}
@ -537,8 +418,30 @@ void Download::handleDownloadProgress(qint64 bytesReceived, qint64 bytesTotal)
bytesTotal = contentTotalSize.toLongLong();
}
QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();
emit downloadProgress(tempFile->pos(), bytesTotal, modelFilename);
const QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();
const qint64 lastUpdate = ModelList::globalInstance()->data(modelFilename, ModelList::TimestampRole).toLongLong();
const qint64 currentUpdate = QDateTime::currentMSecsSinceEpoch();
if (currentUpdate - lastUpdate < 1000)
return;
const qint64 lastBytesReceived = ModelList::globalInstance()->data(modelFilename, ModelList::BytesReceivedRole).toLongLong();
const qint64 currentBytesReceived = tempFile->pos();
qint64 timeDifference = currentUpdate - lastUpdate;
qint64 bytesDifference = currentBytesReceived - lastBytesReceived;
qint64 speed = (bytesDifference / timeDifference) * 1000; // bytes per second
QString speedText;
if (speed < 1024)
speedText = QString::number(static_cast<double>(speed), 'f', 2) + " B/s";
else if (speed < 1024 * 1024)
speedText = QString::number(static_cast<double>(speed / 1024.0), 'f', 2) + " KB/s";
else
speedText = QString::number(static_cast<double>(speed / (1024.0 * 1024.0)), 'f', 2) + " MB/s";
ModelList::globalInstance()->updateData(modelFilename, ModelList::BytesReceivedRole, currentBytesReceived);
ModelList::globalInstance()->updateData(modelFilename, ModelList::BytesTotalRole, bytesTotal);
ModelList::globalInstance()->updateData(modelFilename, ModelList::SpeedRole, speedText);
ModelList::globalInstance()->updateData(modelFilename, ModelList::TimestampRole, currentUpdate);
}
HashAndSaveFile::HashAndSaveFile()
@ -557,9 +460,10 @@ void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &sa
// Reopen the tempFile for hashing
if (!tempFile->open(QIODevice::ReadOnly)) {
qWarning() << "ERROR: Could not open temp file for hashing:"
<< tempFile->fileName() << modelFilename;
emit hashAndSaveFinished(false, tempFile, modelReply);
const QString error
= QString("ERROR: Could not open temp file for hashing: %1 %2").arg(tempFile->fileName()).arg(modelFilename);
qWarning() << error;
emit hashAndSaveFinished(false, error, tempFile, modelReply);
return;
}
@ -568,11 +472,11 @@ void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &sa
hash.addData(tempFile->read(16384));
if (hash.result().toHex() != expectedHash) {
tempFile->close();
qWarning() << "ERROR: Download error MD5SUM did not match:"
<< hash.result().toHex()
<< "!=" << expectedHash << "for" << modelFilename;
const QString error
= QString("ERROR: Download error MD5SUM did not match: %1 != %2 for %3").arg(hash.result().toHex()).arg(expectedHash).arg(modelFilename);
qWarning() << error;
tempFile->remove();
emit hashAndSaveFinished(false, tempFile, modelReply);
emit hashAndSaveFinished(false, error, tempFile, modelReply);
return;
}
@ -582,15 +486,16 @@ void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &sa
// Attempt to *move* the verified tempfile into place - this should be atomic
// but will only work if the destination is on the same filesystem
if (tempFile->rename(saveFilePath)) {
emit hashAndSaveFinished(true, tempFile, modelReply);
emit hashAndSaveFinished(true, QString(), tempFile, modelReply);
return;
}
// Reopen the tempFile for copying
if (!tempFile->open(QIODevice::ReadOnly)) {
qWarning() << "ERROR: Could not open temp file at finish:"
<< tempFile->fileName() << modelFilename;
emit hashAndSaveFinished(false, tempFile, modelReply);
const QString error
= QString("ERROR: Could not open temp file at finish: %1 %2").arg(tempFile->fileName()).arg(modelFilename);
qWarning() << error;
emit hashAndSaveFinished(false, error, tempFile, modelReply);
return;
}
@ -604,14 +509,14 @@ void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &sa
}
file.close();
tempFile->close();
emit hashAndSaveFinished(true, tempFile, modelReply);
emit hashAndSaveFinished(true, QString(), tempFile, modelReply);
} else {
QFile::FileError error = file.error();
qWarning() << "ERROR: Could not save model to location:"
<< saveFilePath
<< "failed with code" << error;
const QString errorString
= QString("ERROR: Could not save model to location: %1 failed with code %2").arg(saveFilePath).arg(error);
qWarning() << errorString;
tempFile->close();
emit hashAndSaveFinished(false, tempFile, modelReply);
emit hashAndSaveFinished(false, errorString, tempFile, modelReply);
return;
}
}
@ -630,40 +535,44 @@ void Download::handleModelDownloadFinished()
qWarning() << "ERROR: downloading:" << modelReply->errorString();
modelReply->deleteLater();
tempFile->deleteLater();
emit downloadFinished(modelFilename);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadingRole, false);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadErrorRole, modelReply->errorString());
return;
}
// The hash and save needs the tempFile closed
tempFile->close();
// Notify that we are calculating hash
ModelInfo info = m_modelMap.value(modelFilename);
info.calcHash = true;
m_modelMap.insert(modelFilename, info);
emit modelListChanged();
if (!ModelList::globalInstance()->contains(modelFilename)) {
qWarning() << "ERROR: downloading no such file:" << modelFilename;
modelReply->deleteLater();
tempFile->deleteLater();
return;
}
const QString saveFilePath = downloadLocalModelsPath() + modelFilename;
emit requestHashAndSave(info.md5sum, saveFilePath, tempFile, modelReply);
// Notify that we are calculating hash
ModelList::globalInstance()->updateData(modelFilename, ModelList::CalcHashRole, true);
QByteArray md5sum = ModelList::globalInstance()->modelInfo(modelFilename).md5sum;
const QString saveFilePath = ModelList::globalInstance()->localModelsPath() + modelFilename;
emit requestHashAndSave(md5sum, saveFilePath, tempFile, modelReply);
}
void Download::handleHashAndSaveFinished(bool success,
void Download::handleHashAndSaveFinished(bool success, const QString &error,
QFile *tempFile, QNetworkReply *modelReply)
{
// The hash and save should send back with tempfile closed
Q_ASSERT(!tempFile->isOpen());
QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();
Network::globalInstance()->sendDownloadFinished(modelFilename, success);
ModelInfo info = m_modelMap.value(modelFilename);
info.calcHash = false;
info.installed = success;
m_modelMap.insert(modelFilename, info);
emit modelListChanged();
ModelList::globalInstance()->updateData(modelFilename, ModelList::CalcHashRole, false);
modelReply->deleteLater();
tempFile->deleteLater();
emit downloadFinished(modelFilename);
ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadingRole, false);
if (!success)
ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadErrorRole, error);
else
ModelList::globalInstance()->updateData(modelFilename, ModelList::DownloadErrorRole, QString());
}
void Download::handleReadyRead()

View File

@ -9,41 +9,6 @@
#include <QTemporaryFile>
#include <QThread>
struct ModelInfo {
Q_GADGET
Q_PROPERTY(QString filename MEMBER filename)
Q_PROPERTY(QString filesize MEMBER filesize)
Q_PROPERTY(QByteArray md5sum MEMBER md5sum)
Q_PROPERTY(bool calcHash MEMBER calcHash)
Q_PROPERTY(bool installed MEMBER installed)
Q_PROPERTY(bool isDefault MEMBER isDefault)
Q_PROPERTY(bool bestGPTJ MEMBER bestGPTJ)
Q_PROPERTY(bool bestLlama MEMBER bestLlama)
Q_PROPERTY(bool bestMPT MEMBER bestMPT)
Q_PROPERTY(bool isChatGPT MEMBER isChatGPT)
Q_PROPERTY(QString description MEMBER description)
Q_PROPERTY(QString requiresVersion MEMBER requiresVersion)
Q_PROPERTY(QString deprecatedVersion MEMBER deprecatedVersion)
Q_PROPERTY(QString url MEMBER url)
public:
QString filename;
QString filesize;
QByteArray md5sum;
bool calcHash = false;
bool installed = false;
bool isDefault = false;
bool bestGPTJ = false;
bool bestLlama = false;
bool bestMPT = false;
bool isChatGPT = false;
QString description;
QString requiresVersion;
QString deprecatedVersion;
QString url;
};
Q_DECLARE_METATYPE(ModelInfo)
struct ReleaseInfo {
Q_GADGET
Q_PROPERTY(QString version MEMBER version)
@ -67,7 +32,7 @@ public Q_SLOTS:
QFile *tempFile, QNetworkReply *modelReply);
Q_SIGNALS:
void hashAndSaveFinished(bool success,
void hashAndSaveFinished(bool success, const QString &error,
QFile *tempFile, QNetworkReply *modelReply);
private:
@ -77,30 +42,24 @@ private:
class Download : public QObject
{
Q_OBJECT
Q_PROPERTY(QList<ModelInfo> modelList READ modelList NOTIFY modelListChanged)
Q_PROPERTY(bool hasNewerRelease READ hasNewerRelease NOTIFY hasNewerReleaseChanged)
Q_PROPERTY(ReleaseInfo releaseInfo READ releaseInfo NOTIFY releaseInfoChanged)
Q_PROPERTY(QString downloadLocalModelsPath READ downloadLocalModelsPath
WRITE setDownloadLocalModelsPath
NOTIFY downloadLocalModelsPathChanged)
public:
static Download *globalInstance();
QList<ModelInfo> modelList() const;
ReleaseInfo releaseInfo() const;
bool hasNewerRelease() const;
Q_INVOKABLE void updateModelList();
Q_INVOKABLE void updateReleaseNotes();
Q_INVOKABLE void downloadModel(const QString &modelFile);
Q_INVOKABLE void cancelDownload(const QString &modelFile);
Q_INVOKABLE void installModel(const QString &modelFile, const QString &apiKey);
Q_INVOKABLE void removeModel(const QString &modelFile);
Q_INVOKABLE QString defaultLocalModelsPath() const;
Q_INVOKABLE QString downloadLocalModelsPath() const;
Q_INVOKABLE void setDownloadLocalModelsPath(const QString &modelPath);
Q_INVOKABLE bool isFirstStart() const;
public Q_SLOTS:
void updateModelList();
void updateReleaseNotes();
private Q_SLOTS:
void handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors);
void handleModelsJsonDownloadFinished();
@ -108,17 +67,13 @@ private Q_SLOTS:
void handleErrorOccurred(QNetworkReply::NetworkError code);
void handleDownloadProgress(qint64 bytesReceived, qint64 bytesTotal);
void handleModelDownloadFinished();
void handleHashAndSaveFinished(bool success,
void handleHashAndSaveFinished(bool success, const QString &error,
QFile *tempFile, QNetworkReply *modelReply);
void handleReadyRead();
Q_SIGNALS:
void downloadProgress(qint64 bytesReceived, qint64 bytesTotal, const QString &modelFile);
void downloadFinished(const QString &modelFile);
void modelListChanged();
void releaseInfoChanged();
void hasNewerReleaseChanged();
void downloadLocalModelsPathChanged();
void requestHashAndSave(const QString &hash, const QString &saveFilePath,
QFile *tempFile, QNetworkReply *modelReply);
@ -128,11 +83,9 @@ private:
QString incompleteDownloadPath(const QString &modelFile);
HashAndSaveFile *m_hashAndSave;
QMap<QString, ModelInfo> m_modelMap;
QMap<QString, ReleaseInfo> m_releaseMap;
QNetworkAccessManager m_networkManager;
QMap<QNetworkReply*, QFile*> m_activeDownloads;
QString m_downloadLocalModelsPath;
QDateTime m_startTime;
private:

View File

@ -1,6 +1,8 @@
#include "llm.h"
#include "config.h"
#include "download.h"
#include "sysinfo.h"
#include "chatlistmodel.h"
#include "../gpt4all-backend/llmodel.h"
#include "network.h"
#include <QCoreApplication>
@ -20,7 +22,6 @@ LLM *LLM::globalInstance()
LLM::LLM()
: QObject{nullptr}
, m_chatListModel(new ChatListModel(this))
, m_threadCount(std::min(4, (int32_t) std::thread::hardware_concurrency()))
, m_serverEnabled(false)
, m_compatHardware(true)
@ -39,7 +40,7 @@ LLM::LLM()
#endif
LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
connect(this, &LLM::serverEnabledChanged,
m_chatListModel, &ChatListModel::handleServerEnabledChanged);
ChatListModel::globalInstance(), &ChatListModel::handleServerEnabledChanged);
#if defined(__x86_64__)
#ifndef _MSC_VER
@ -95,6 +96,16 @@ bool LLM::fileExists(const QString &path) const
return info.exists() && info.isFile();
}
qint64 LLM::systemTotalRAMInGB() const
{
return getSystemTotalRAMInGB();
}
QString LLM::systemTotalRAMInGBString() const
{
return QString::fromStdString(getSystemTotalRAMInGBString());
}
int32_t LLM::threadCount() const
{
return m_threadCount;

View File

@ -3,12 +3,9 @@
#include <QObject>
#include "chatlistmodel.h"
class LLM : public QObject
{
Q_OBJECT
Q_PROPERTY(ChatListModel *chatListModel READ chatListModel NOTIFY chatListModelChanged)
Q_PROPERTY(int32_t threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)
Q_PROPERTY(bool serverEnabled READ serverEnabled WRITE setServerEnabled NOTIFY serverEnabledChanged)
Q_PROPERTY(bool compatHardware READ compatHardware NOTIFY compatHardwareChanged)
@ -16,7 +13,6 @@ class LLM : public QObject
public:
static LLM *globalInstance();
ChatListModel *chatListModel() const { return m_chatListModel; }
int32_t threadCount() const;
void setThreadCount(int32_t n_threads);
bool serverEnabled() const;
@ -27,15 +23,17 @@ public:
Q_INVOKABLE bool checkForUpdates() const;
Q_INVOKABLE bool directoryExists(const QString &path) const;
Q_INVOKABLE bool fileExists(const QString &path) const;
Q_INVOKABLE qint64 systemTotalRAMInGB() const;
Q_INVOKABLE QString systemTotalRAMInGBString() const;
Q_SIGNALS:
void chatListModelChanged();
void modelListChanged();
void threadCountChanged();
void serverEnabledChanged();
void compatHardwareChanged();
private:
ChatListModel *m_chatListModel;
int32_t m_threadCount;
bool m_serverEnabled;
bool m_compatHardware;

View File

@ -6,6 +6,8 @@
#include <QSettings>
#include "llm.h"
#include "modellist.h"
#include "chatlistmodel.h"
#include "localdocs.h"
#include "download.h"
#include "network.h"
@ -24,6 +26,8 @@ int main(int argc, char *argv[])
QGuiApplication app(argc, argv);
QQmlApplicationEngine engine;
qmlRegisterSingletonInstance("modellist", 1, 0, "ModelList", ModelList::globalInstance());
qmlRegisterSingletonInstance("chatlistmodel", 1, 0, "ChatListModel", ChatListModel::globalInstance());
qmlRegisterSingletonInstance("llm", 1, 0, "LLM", LLM::globalInstance());
qmlRegisterSingletonInstance("download", 1, 0, "Download", Download::globalInstance());
qmlRegisterSingletonInstance("network", 1, 0, "Network", Network::globalInstance());

View File

@ -5,7 +5,9 @@ import QtQuick.Controls.Basic
import QtQuick.Layouts
import Qt5Compat.GraphicalEffects
import llm
import chatlistmodel
import download
import modellist
import network
import gpt4all
@ -22,7 +24,7 @@ Window {
id: theme
}
property var currentChat: LLM.chatListModel.currentChat
property var currentChat: ChatListModel.currentChat
property var chatModel: currentChat.chatModel
property bool hasSaved: false
@ -31,12 +33,12 @@ Window {
return;
savingPopup.open();
LLM.chatListModel.saveChats();
ChatListModel.saveChats();
close.accepted = false
}
Connections {
target: LLM.chatListModel
target: ChatListModel
function onSaveChatsFinished() {
window.hasSaved = true;
savingPopup.close();
@ -96,7 +98,7 @@ Window {
}
// check for any current models and if not, open download dialog
if (currentChat.modelList.length === 0 && !firstStartDialog.opened) {
if (ModelList.count === 0 && !firstStartDialog.opened) {
downloadNewModels.open();
return;
}
@ -144,7 +146,17 @@ Window {
id: modelLoadingErrorPopup
anchors.centerIn: parent
shouldTimeOut: false
text: currentChat.modelLoadingError
text: qsTr("<h3>Encountered an error loading model:</h3><br>")
+ "<i>\"" + currentChat.modelLoadingError + "\"</i>"
+ qsTr("<br><br>Model loading failures can happen for a variety of reasons, but the most common "
+ "causes include a bad file format, an incomplete or corrupted download, the wrong file "
+ "type or an incompatible model type. Here are some suggestions for resolving the problem:"
+ "<br><ul>"
+ "<li>Ensure the model file has a compatible ggml format and type"
+ "<li>Check the model file is complete in the download folder"
+ "<li>You can find the download folder in the settings dialog"
+ "<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum"
+ "<li>Check out our <a href=\"https://discord.gg/4M2QFmTt2k\">discord channel</a> for help")
}
Rectangle {
@ -180,12 +192,22 @@ Window {
anchors.horizontalCenter: parent.horizontalCenter
anchors.horizontalCenterOffset: window.width >= 950 ? 0 : Math.max(-((950 - window.width) / 2), -99.5)
enabled: !currentChat.isServer
model: currentChat.modelList
model: ModelList.installedModels
valueRole: "filename"
textRole: "name"
Connections {
target: currentChat
function onModelInfoChanged() {
comboBox.currentIndex = comboBox.indexOfValue(currentChat.modelInfo.filename)
}
}
contentItem: Text {
anchors.horizontalCenter: parent.horizontalCenter
leftPadding: 10
rightPadding: 20
text: currentChat.modelLoadingError !== "" ? "Model loading error..." : comboBox.displayText
text: currentChat.modelLoadingError !== "" ? qsTr("Model loading error...")
: (comboBox.textAt(comboBox.currentIndex) !== "" ? comboBox.textAt(comboBox.currentIndex)
: comboBox.valueAt(comboBox.currentIndex))
font: comboBox.font
color: theme.textColor
verticalAlignment: Text.AlignVCenter
@ -195,7 +217,7 @@ Window {
delegate: ItemDelegate {
width: comboBox.width
contentItem: Text {
text: modelData
text: name !== "" ? name : filename
color: theme.textColor
font: comboBox.font
elide: Text.ElideRight
@ -212,7 +234,7 @@ Window {
onActivated: {
currentChat.stopGenerating()
currentChat.reset();
currentChat.modelName = comboBox.currentText
currentChat.modelInfo = ModelList.modelInfo(comboBox.valueAt(comboBox.currentIndex))
}
}
}
@ -552,7 +574,7 @@ Window {
id: downloadNewModels
anchors.centerIn: parent
width: Math.min(1024, window.width - (window.width * .2))
height: Math.min(600, window.height - (window.height * .2))
height: window.height - (window.height * .2)
Item {
Accessible.role: Accessible.Dialog
Accessible.name: qsTr("Download new models dialog")
@ -817,7 +839,7 @@ Window {
}
Image {
visible: currentChat.isServer || currentChat.modelName.startsWith("chatgpt-")
visible: currentChat.isServer || currentChat.modelInfo.isChatGPT
anchors.fill: parent
sourceSize.width: 1024
sourceSize.height: 1024

View File

@ -1,97 +1,262 @@
[
{
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
"filesize": "3785248281",
"isDefault": "true",
"bestGPTJ": "true",
"description": "GPT-J 6B finetuned by Nomic AI on the latest GPT4All dataset.<br>- Licensed for commercial use.<br>- Fast responses."
},
{
"md5sum": "11d9f060ca24575a2c303bdc39952486",
"filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin",
"filesize": "8136770688",
"requires": "2.4.7",
"isDefault": "true",
"bestLlama": "true",
"description": "LLaMA 13B finetuned by Nomic AI on the latest GPT4All dataset.<br>- Cannot be used commercially.<br>- Slower responses but higher quality.",
"url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
},
{
"md5sum": "756249d3d6abe23bde3b1ae272628640",
"filename": "ggml-mpt-7b-chat.bin",
"filesize": "4854401050",
"isDefault": "true",
"bestMPT": "true",
"requires": "2.4.1",
"description": "MPT 7B chat model trained by Mosaic ML.<br>- Cannot be used commercially.<br>- Fast responses."
},
{
"order": "a",
"md5sum": "4acc146dd43eb02845c233c29289c7c5",
"name": "Hermes",
"filename": "nous-hermes-13b.ggmlv3.q4_0.bin",
"filesize": "8136777088",
"requires": "2.4.7",
"description": "LLaMa 13B finetuned on over 300,000 curated and uncensored instructions.<br>- Cannot be used commercially.<br>- Best finetuned LLaMA model.<br>- This model was fine-tuned by Nous Research, with Teknium and Karan4D leading the fine tuning process and dataset curation, Redmond AI sponsoring the compute, and several other contributors. The result is an enhanced Llama 13b model that rivals GPT-3.5-turbo in performance across a variety of tasks. This model stands out for its long responses, low hallucination rate, and absence of OpenAI censorship mechanisms.",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
"description": "
<strong>Best overall model</strong>
<br>
<ul>
<li>Instruction based
<li>Gives long reponses
<li>Curated with 300,000 uncensored instructions
<li>Trained by Nous Research
<li>Cannot be used commercially
</ul>",
"url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin"
},
{
"order": "b",
"md5sum": "756249d3d6abe23bde3b1ae272628640",
"name": "MPT Chat",
"filename": "ggml-mpt-7b-chat.bin",
"filesize": "4854401050",
"requires": "2.4.1",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
"description": "
<strong>Best overall smaller model</strong>
<br>
<ul>
<li>Fast responses
<li>Chat based
<li>Trained by Mosaic ML
<li>Cannot be used commercially
</ul>"
},
{
"order": "c",
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
"name": "Groovy",
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
"filesize": "3785248281",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "GPT-J",
"description": "
<strong>Best overall for commercial usage</strong>
<br>
<ul>
<li>Fast responses
<li>Creative responses</li>
<li>Instruction based</li>
<li>Trained by Nomic ML
<li>Licensed for commercial use
</ul>"
},
{
"order": "d",
"md5sum": "11d9f060ca24575a2c303bdc39952486",
"name": "Snoozy",
"filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin",
"filesize": "8136770688",
"requires": "2.4.7",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
"description": "
<strong>Very good overall model</strong>
<br>
<ul>
<li>Instruction based
<li>Based on the same dataset as Groovy
<li>Slower than Groovy, with higher quality responses
<li>Trained by Nomic AI
<li>Cannot be used commercially
</ul>",
"url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
},
{
"order": "e",
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
"name": "Vicuna",
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
"filesize": "4212859520",
"description": "LLaMA 7B finetuned by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.<br>- Cannot be used commercially."
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_2",
"type": "LLaMA",
"description": "
<strong>Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego</strong>
<br>
<ul>
<li>Instruction based
<li>Cannot be used commercially
</ul>"
},
{
"order": "f",
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
"name": "Vicuna (large)",
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
"filesize": "8136770688",
"description": "LLaMA 13B trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego.<br>- Cannot be used commercially."
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_2",
"type": "LLaMA",
"description": "
<strong>Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego</strong>
<br>
<ul>
<li>Instruction based
<li>Cannot be used commercially
</ul>"
},
{
"order": "g",
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
"name": "Wizard",
"filename": "ggml-wizardLM-7B.q4_2.bin",
"filesize": "4212864640",
"description": "LLaMA 7B finetuned by Microsoft and Peking University.<br>- Cannot be used commercially."
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_2",
"type": "LLaMA",
"description": "
<strong>Good small model - trained by by Microsoft and Peking University</strong>
<br>
<ul>
<li>Instruction based
<li>Cannot be used commercially
</ul>"
},
{
"order": "h",
"md5sum": "6cb4ee297537c9133bddab9692879de0",
"name": "Stable Vicuna",
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
"filesize": "8136777088",
"description": "LLaMa 13B finetuned with RLHF by Stability AI.<br>- Cannot be used commercially."
},
{
"md5sum": "120c32a51d020066288df045ef5d52b9",
"filename": "ggml-mpt-7b-base.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "MPT 7B pre-trained by Mosaic ML. Trained for text completion with no assistant finetuning.<br>- Licensed for commercial use."
},
{
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
"filesize": "8136777088",
"description": "LLaMa 13B fine-tuned on ~180,000 instructions by Nous Research.<br>- Cannot be used commercially."
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_2",
"type": "LLaMA",
"description": "
<strong>Trained with RHLF by Stability AI</strong>
<br>
<ul>
<li>Instruction based
<li>Cannot be used commercially
</ul>"
},
{
"order": "i",
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
"name": "MPT Instruct",
"filename": "ggml-mpt-7b-instruct.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"description": "MPT 7B instruction finetuned by Mosaic ML.<br>- Licensed for commercial use."
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
"description": "
<strong>Mosaic's instruction model</strong>
<br>
<ul>
<li>Instruction based
<li>Trained by Mosaic ML
<li>Licensed for commercial use
</ul>"
},
{
"order": "j",
"md5sum": "120c32a51d020066288df045ef5d52b9",
"name": "MPT Base",
"filename": "ggml-mpt-7b-base.bin",
"filesize": "4854401028",
"requires": "2.4.1",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
"description": "
<strong>Trained for text completion with no assistant finetuning</strong>
<br>
<ul>
<li>Completion based
<li>Trained by Mosaic ML
<li>Licensed for commercial use
</ul>"
},
{
"order": "k",
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
"name": "Nous Vicuna",
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
"filesize": "8136777088",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
"description": "
<strong>Trained on ~180,000 instructions</strong>
<br>
<ul>
<li>Instruction based
<li>Trained by Nous Research
<li>Cannot be used commercially
</ul>"
},
{
"order": "l",
"md5sum": "489d21fd48840dcb31e5f92f453f3a20",
"name": "Wizard Uncensored",
"filename": "wizardLM-13B-Uncensored.ggmlv3.q4_0.bin",
"filesize": "8136777088",
"requires": "2.4.7",
"description": "LLaMa 13B finetuned on the uncensored assistant and instruction data.<br>- Cannot be used commercially.",
"ramrequired": "16",
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
"description": "
<strong>Trained on uncensored assistant data and instruction data</strong>
<br>
<ul>
<li>Instruction based
<li>Cannot be used commercially
</ul>",
"url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin"
},
{
"order": "m",
"md5sum": "615890cb571fcaa0f70b2f8d15ef809e",
"disableGUI": "true",
"name": "Replit",
"filename": "ggml-replit-code-v1-3b.bin",
"filesize": "5202046853",
"requires": "2.4.7",
"description": "Replit 3B code model trained on subset of the Stack.<br>- Licensed for commercial use.",
"ramrequired": "4",
"parameters": "3 billion",
"quant": "f16",
"type": "Replit",
"description": "
<strong>Trained on subset of the Stack</strong>
<br>
<ul>
<li>Code completion based
<li>Licensed for commercial use
</ul>",
"url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin"
}
]

484
gpt4all-chat/modellist.cpp Normal file
View File

@ -0,0 +1,484 @@
#include "modellist.h"
#include <algorithm>
bool InstalledModels::filterAcceptsRow(int sourceRow,
const QModelIndex &sourceParent) const
{
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
bool isInstalled = sourceModel()->data(index, ModelList::InstalledRole).toBool();
return isInstalled;
}
DownloadableModels::DownloadableModels(QObject *parent)
: QSortFilterProxyModel(parent)
, m_expanded(false)
, m_limit(5)
{
connect(this, &DownloadableModels::rowsInserted, this, &DownloadableModels::countChanged);
connect(this, &DownloadableModels::rowsRemoved, this, &DownloadableModels::countChanged);
connect(this, &DownloadableModels::modelReset, this, &DownloadableModels::countChanged);
connect(this, &DownloadableModels::layoutChanged, this, &DownloadableModels::countChanged);
}
bool DownloadableModels::filterAcceptsRow(int sourceRow,
const QModelIndex &sourceParent) const
{
bool withinLimit = sourceRow < (m_expanded ? sourceModel()->rowCount() : m_limit);
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
bool isDownloadable = !sourceModel()->data(index, ModelList::DescriptionRole).toString().isEmpty();
bool showInGUI = !sourceModel()->data(index, ModelList::DisableGUIRole).toBool();
return withinLimit && isDownloadable && showInGUI;
}
int DownloadableModels::count() const
{
return rowCount();
}
bool DownloadableModels::isExpanded() const
{
return m_expanded;
}
void DownloadableModels::setExpanded(bool expanded)
{
if (m_expanded != expanded) {
m_expanded = expanded;
invalidateFilter();
emit expandedChanged(m_expanded);
}
}
class MyModelList: public ModelList { };
Q_GLOBAL_STATIC(MyModelList, modelListInstance)
ModelList *ModelList::globalInstance()
{
return modelListInstance();
}
ModelList::ModelList()
: QAbstractListModel(nullptr)
, m_installedModels(new InstalledModels(this))
, m_downloadableModels(new DownloadableModels(this))
{
m_installedModels->setSourceModel(this);
m_downloadableModels->setSourceModel(this);
m_watcher = new QFileSystemWatcher(this);
QSettings settings;
settings.sync();
m_localModelsPath = settings.value("modelPath", defaultLocalModelsPath()).toString();
const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
m_watcher->addPath(exePath);
m_watcher->addPath(m_localModelsPath);
connect(m_watcher, &QFileSystemWatcher::directoryChanged, this, &ModelList::updateModelsFromDirectory);
updateModelsFromDirectory();
}
QString ModelList::incompleteDownloadPath(const QString &modelFile)
{
return localModelsPath() + "incomplete-" + modelFile;
}
const QList<ModelInfo> ModelList::exportModelList() const
{
QMutexLocker locker(&m_mutex);
QList<ModelInfo> infos;
for (ModelInfo *info : m_models)
if (info->installed)
infos.append(*info);
return infos;
}
const QList<QString> ModelList::userDefaultModelList() const
{
QMutexLocker locker(&m_mutex);
QSettings settings;
settings.sync();
const QString userDefaultModelName = settings.value("userDefaultModel").toString();
QList<QString> models;
bool foundUserDefault = false;
for (ModelInfo *info : m_models) {
if (info->installed && (info->name == userDefaultModelName || info->filename == userDefaultModelName)) {
foundUserDefault = true;
models.prepend(info->name.isEmpty() ? info->filename : info->name);
} else if (info->installed) {
models.append(info->name.isEmpty() ? info->filename : info->name);
}
}
const QString defaultFileName = "Application default";
if (foundUserDefault)
models.append(defaultFileName);
else
models.prepend(defaultFileName);
return models;
}
ModelInfo ModelList::defaultModelInfo() const
{
QMutexLocker locker(&m_mutex);
QSettings settings;
settings.sync();
// The user default model can be set by the user in the settings dialog. The "default" user
// default model is "Application default" which signals we should use the default model that was
// specified by the models.json file.
const QString defaultModelName = settings.value("userDefaultModel").toString();
const bool hasDefaultName = !defaultModelName.isEmpty() && defaultModelName != "Application default";
ModelInfo *defaultModel = nullptr;
for (ModelInfo *info : m_models) {
if (!info->installed)
continue;
defaultModel = info;
if (!hasDefaultName && defaultModel->isDefault) break;
if (hasDefaultName && (defaultModel->name == defaultModelName || defaultModel->filename == defaultModelName)) break;
}
if (defaultModel)
return *defaultModel;
return ModelInfo();
}
bool ModelList::contains(const QString &filename) const
{
QMutexLocker locker(&m_mutex);
return m_modelMap.contains(filename);
}
bool ModelList::lessThan(const ModelInfo* a, const ModelInfo* b)
{
// Rule 1: Non-empty 'order' before empty
if (a->order.isEmpty() != b->order.isEmpty()) {
return !a->order.isEmpty();
}
// Rule 2: Both 'order' are non-empty, sort alphanumerically
if (!a->order.isEmpty() && !b->order.isEmpty()) {
return a->order < b->order;
}
// Rule 3: Both 'order' are empty, sort by filename
return a->filename < b->filename;
}
void ModelList::addModel(const QString &filename)
{
QMutexLocker locker(&m_mutex);
Q_ASSERT(!m_modelMap.contains(filename));
if (m_modelMap.contains(filename)) {
qWarning() << "ERROR: model list already contains" << filename;
return;
}
beginInsertRows(QModelIndex(), m_models.size(), m_models.size());
ModelInfo *info = new ModelInfo;
info->filename = filename;
m_models.append(info);
m_modelMap.insert(filename, info);
endInsertRows();
std::stable_sort(m_models.begin(), m_models.end(), ModelList::lessThan);
emit dataChanged(index(0, 0), index(m_models.size() - 1, 0));
emit userDefaultModelListChanged();
}
int ModelList::rowCount(const QModelIndex &parent) const
{
Q_UNUSED(parent)
QMutexLocker locker(&m_mutex);
return m_models.size();
}
QVariant ModelList::dataInternal(const ModelInfo *info, int role) const
{
switch (role) {
case NameRole:
return info->name;
case FilenameRole:
return info->filename;
case DirpathRole:
return info->dirpath;
case FilesizeRole:
return info->filesize;
case Md5sumRole:
return info->md5sum;
case CalcHashRole:
return info->calcHash;
case InstalledRole:
return info->installed;
case DefaultRole:
return info->isDefault;
case ChatGPTRole:
return info->isChatGPT;
case DisableGUIRole:
return info->disableGUI;
case DescriptionRole:
return info->description;
case RequiresVersionRole:
return info->requiresVersion;
case DeprecatedVersionRole:
return info->deprecatedVersion;
case UrlRole:
return info->url;
case BytesReceivedRole:
return info->bytesReceived;
case BytesTotalRole:
return info->bytesTotal;
case TimestampRole:
return info->timestamp;
case SpeedRole:
return info->speed;
case DownloadingRole:
return info->isDownloading;
case IncompleteRole:
return info->isIncomplete;
case DownloadErrorRole:
return info->downloadError;
case OrderRole:
return info->order;
case RamrequiredRole:
return info->ramrequired;
case ParametersRole:
return info->parameters;
case QuantRole:
return info->quant;
case TypeRole:
return info->type;
}
return QVariant();
}
QVariant ModelList::data(const QString &filename, int role) const
{
QMutexLocker locker(&m_mutex);
ModelInfo *info = m_modelMap.value(filename);
return dataInternal(info, role);
}
QVariant ModelList::data(const QModelIndex &index, int role) const
{
QMutexLocker locker(&m_mutex);
if (!index.isValid() || index.row() < 0 || index.row() >= m_models.size())
return QVariant();
const ModelInfo *info = m_models.at(index.row());
return dataInternal(info, role);
}
void ModelList::updateData(const QString &filename, int role, const QVariant &value)
{
QMutexLocker locker(&m_mutex);
if (!m_modelMap.contains(filename)) {
qWarning() << "ERROR: cannot update as model map does not contain" << filename;
return;
}
ModelInfo *info = m_modelMap.value(filename);
const int index = m_models.indexOf(info);
if (index == -1) {
qWarning() << "ERROR: cannot update as model list does not contain" << filename;
return;
}
switch (role) {
case NameRole:
info->name = value.toString(); break;
case FilenameRole:
info->filename = value.toString(); break;
case DirpathRole:
info->dirpath = value.toString(); break;
case FilesizeRole:
info->filesize = value.toString(); break;
case Md5sumRole:
info->md5sum = value.toByteArray(); break;
case CalcHashRole:
info->calcHash = value.toBool(); break;
case InstalledRole:
info->installed = value.toBool(); break;
case DefaultRole:
info->isDefault = value.toBool(); break;
case ChatGPTRole:
info->isChatGPT = value.toBool(); break;
case DisableGUIRole:
info->disableGUI = value.toBool(); break;
case DescriptionRole:
info->description = value.toString(); break;
case RequiresVersionRole:
info->requiresVersion = value.toString(); break;
case DeprecatedVersionRole:
info->deprecatedVersion = value.toString(); break;
case UrlRole:
info->url = value.toString(); break;
case BytesReceivedRole:
info->bytesReceived = value.toLongLong(); break;
case BytesTotalRole:
info->bytesTotal = value.toLongLong(); break;
case TimestampRole:
info->timestamp = value.toLongLong(); break;
case SpeedRole:
info->speed = value.toString(); break;
case DownloadingRole:
info->isDownloading = value.toBool(); break;
case IncompleteRole:
info->isIncomplete = value.toBool(); break;
case DownloadErrorRole:
info->downloadError = value.toString(); break;
case OrderRole:
info->order = value.toString(); break;
case RamrequiredRole:
info->ramrequired = value.toInt(); break;
case ParametersRole:
info->parameters = value.toString(); break;
case QuantRole:
info->quant = value.toString(); break;
case TypeRole:
info->type = value.toString(); break;
}
// Extra guarantee that these always remains in sync with filesystem
QFileInfo fileInfo(info->dirpath + info->filename);
if (info->installed != fileInfo.exists()) {
info->installed = fileInfo.exists();
emit dataChanged(createIndex(index, 0), createIndex(index, 0), {InstalledRole});
}
QFileInfo incompleteInfo(incompleteDownloadPath(info->filename));
if (info->isIncomplete != incompleteInfo.exists()) {
info->isIncomplete = incompleteInfo.exists();
emit dataChanged(createIndex(index, 0), createIndex(index, 0), {IncompleteRole});
}
std::stable_sort(m_models.begin(), m_models.end(), ModelList::lessThan);
emit dataChanged(createIndex(0, 0), createIndex(m_models.size() - 1, 0));
emit userDefaultModelListChanged();
}
ModelInfo ModelList::modelInfo(const QString &filename) const
{
QMutexLocker locker(&m_mutex);
if (!m_modelMap.contains(filename))
return ModelInfo();
return *m_modelMap.value(filename);
}
QString ModelList::defaultLocalModelsPath() const
{
QString localPath = QStandardPaths::writableLocation(QStandardPaths::AppLocalDataLocation)
+ "/";
QString testWritePath = localPath + QString("test_write.txt");
QString canonicalLocalPath = QFileInfo(localPath).canonicalFilePath() + "/";
QDir localDir(localPath);
if (!localDir.exists()) {
if (!localDir.mkpath(localPath)) {
qWarning() << "ERROR: Local download directory can't be created:" << canonicalLocalPath;
return canonicalLocalPath;
}
}
if (QFileInfo::exists(testWritePath))
return canonicalLocalPath;
QFile testWriteFile(testWritePath);
if (testWriteFile.open(QIODeviceBase::ReadWrite)) {
testWriteFile.close();
return canonicalLocalPath;
}
qWarning() << "ERROR: Local download path appears not writeable:" << canonicalLocalPath;
return canonicalLocalPath;
}
QString ModelList::localModelsPath() const
{
return m_localModelsPath;
}
void ModelList::setLocalModelsPath(const QString &modelPath)
{
QString filePath = (modelPath.startsWith("file://") ?
QUrl(modelPath).toLocalFile() : modelPath);
QString canonical = QFileInfo(filePath).canonicalFilePath() + "/";
if (m_localModelsPath != canonical) {
m_localModelsPath = canonical;
emit localModelsPathChanged();
}
}
QString ModelList::modelDirPath(const QString &modelName, bool isChatGPT)
{
QVector<QString> possibleFilePaths;
if (isChatGPT)
possibleFilePaths << "/" + modelName + ".txt";
else {
possibleFilePaths << "/ggml-" + modelName + ".bin";
possibleFilePaths << "/" + modelName + ".bin";
}
for (const QString &modelFilename : possibleFilePaths) {
QString appPath = QCoreApplication::applicationDirPath() + modelFilename;
QFileInfo infoAppPath(appPath);
if (infoAppPath.exists())
return QCoreApplication::applicationDirPath();
QString downloadPath = localModelsPath() + modelFilename;
QFileInfo infoLocalPath(downloadPath);
if (infoLocalPath.exists())
return localModelsPath();
}
return QString();
}
void ModelList::updateModelsFromDirectory()
{
const QString exePath = QCoreApplication::applicationDirPath() + QDir::separator();
const QString localPath = localModelsPath();
{
QDir dir(exePath);
QStringList allFiles = dir.entryList(QDir::Files);
// All files that end with .bin and have 'ggml' somewhere in the name
QStringList fileNames;
for(const QString& filename : allFiles) {
if (filename.endsWith(".bin") && filename.contains("ggml")) {
fileNames.append(filename);
}
}
for (const QString& f : fileNames) {
QString filePath = exePath + f;
QFileInfo info(filePath);
if (!info.exists())
continue;
if (!contains(f))
addModel(f);
updateData(f, DirpathRole, exePath);
updateData(f, FilesizeRole, toFileSize(info.size()));
}
}
if (localPath != exePath) {
QDir dir(localPath);
QStringList allFiles = dir.entryList(QDir::Files);
QStringList fileNames;
for(const QString& filename : allFiles) {
if ((filename.endsWith(".bin") && filename.contains("ggml"))
|| (filename.endsWith(".txt") && filename.startsWith("chatgpt-"))) {
fileNames.append(filename);
}
}
for (const QString& f : fileNames) {
QString filePath = localPath + f;
QFileInfo info(filePath);
if (!info.exists())
continue;
if (!contains(f))
addModel(f);
updateData(f, ChatGPTRole, f.startsWith("chatgpt-"));
updateData(f, DirpathRole, localPath);
updateData(f, FilesizeRole, toFileSize(info.size()));
}
}
}

246
gpt4all-chat/modellist.h Normal file
View File

@ -0,0 +1,246 @@
#ifndef MODELLIST_H
#define MODELLIST_H
#include <QAbstractListModel>
#include <QtQml>
struct ModelInfo {
Q_GADGET
Q_PROPERTY(QString name MEMBER name)
Q_PROPERTY(QString filename MEMBER filename)
Q_PROPERTY(QString dirpath MEMBER dirpath)
Q_PROPERTY(QString filesize MEMBER filesize)
Q_PROPERTY(QByteArray md5sum MEMBER md5sum)
Q_PROPERTY(bool calcHash MEMBER calcHash)
Q_PROPERTY(bool installed MEMBER installed)
Q_PROPERTY(bool isDefault MEMBER isDefault)
Q_PROPERTY(bool disableGUI MEMBER disableGUI)
Q_PROPERTY(bool isChatGPT MEMBER isChatGPT)
Q_PROPERTY(QString description MEMBER description)
Q_PROPERTY(QString requiresVersion MEMBER requiresVersion)
Q_PROPERTY(QString deprecatedVersion MEMBER deprecatedVersion)
Q_PROPERTY(QString url MEMBER url)
Q_PROPERTY(qint64 bytesReceived MEMBER bytesReceived)
Q_PROPERTY(qint64 bytesTotal MEMBER bytesTotal)
Q_PROPERTY(qint64 timestamp MEMBER timestamp)
Q_PROPERTY(QString speed MEMBER speed)
Q_PROPERTY(bool isDownloading MEMBER isDownloading)
Q_PROPERTY(bool isIncomplete MEMBER isIncomplete)
Q_PROPERTY(QString downloadError MEMBER downloadError)
Q_PROPERTY(QString order MEMBER order)
Q_PROPERTY(int ramrequired MEMBER ramrequired)
Q_PROPERTY(QString parameters MEMBER parameters)
Q_PROPERTY(QString quant MEMBER quant)
Q_PROPERTY(QString type MEMBER type)
public:
QString name;
QString filename;
QString dirpath;
QString filesize;
QByteArray md5sum;
bool calcHash = false;
bool installed = false;
bool isDefault = false;
bool isChatGPT = false;
bool disableGUI = false;
QString description;
QString requiresVersion;
QString deprecatedVersion;
QString url;
qint64 bytesReceived = 0;
qint64 bytesTotal = 0;
qint64 timestamp = 0;
QString speed;
bool isDownloading = false;
bool isIncomplete = false;
QString downloadError;
QString order;
int ramrequired = 0;
QString parameters;
QString quant;
QString type;
bool operator==(const ModelInfo &other) const {
return filename == other.filename;
}
};
Q_DECLARE_METATYPE(ModelInfo)
class InstalledModels : public QSortFilterProxyModel
{
Q_OBJECT
public:
explicit InstalledModels(QObject *parent) : QSortFilterProxyModel(parent) {}
protected:
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
};
class DownloadableModels : public QSortFilterProxyModel
{
Q_OBJECT
Q_PROPERTY(int count READ count NOTIFY countChanged)
Q_PROPERTY(bool expanded READ isExpanded WRITE setExpanded NOTIFY expandedChanged)
public:
explicit DownloadableModels(QObject *parent);
int count() const;
bool isExpanded() const;
void setExpanded(bool expanded);
Q_SIGNALS:
void countChanged();
protected:
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
Q_SIGNALS:
void expandedChanged(bool expanded);
private:
bool m_expanded;
int m_limit;
};
class ModelList : public QAbstractListModel
{
Q_OBJECT
Q_PROPERTY(int count READ count NOTIFY countChanged)
Q_PROPERTY(QString localModelsPath READ localModelsPath WRITE setLocalModelsPath NOTIFY localModelsPathChanged)
Q_PROPERTY(InstalledModels* installedModels READ installedModels NOTIFY installedModelsChanged)
Q_PROPERTY(DownloadableModels* downloadableModels READ downloadableModels NOTIFY downloadableModelsChanged)
Q_PROPERTY(QList<QString> userDefaultModelList READ userDefaultModelList NOTIFY userDefaultModelListChanged)
public:
static ModelList *globalInstance();
enum Roles {
NameRole = Qt::UserRole + 1,
FilenameRole,
DirpathRole,
FilesizeRole,
Md5sumRole,
CalcHashRole,
InstalledRole,
DefaultRole,
ChatGPTRole,
DisableGUIRole,
DescriptionRole,
RequiresVersionRole,
DeprecatedVersionRole,
UrlRole,
BytesReceivedRole,
BytesTotalRole,
TimestampRole,
SpeedRole,
DownloadingRole,
IncompleteRole,
DownloadErrorRole,
OrderRole,
RamrequiredRole,
ParametersRole,
QuantRole,
TypeRole
};
QHash<int, QByteArray> roleNames() const override
{
QHash<int, QByteArray> roles;
roles[NameRole] = "name";
roles[FilenameRole] = "filename";
roles[DirpathRole] = "dirpath";
roles[FilesizeRole] = "filesize";
roles[Md5sumRole] = "md5sum";
roles[CalcHashRole] = "calcHash";
roles[InstalledRole] = "installed";
roles[DefaultRole] = "isDefault";
roles[ChatGPTRole] = "isChatGPT";
roles[DisableGUIRole] = "disableGUI";
roles[DescriptionRole] = "description";
roles[RequiresVersionRole] = "requiresVersion";
roles[DeprecatedVersionRole] = "deprecatedVersion";
roles[UrlRole] = "url";
roles[BytesReceivedRole] = "bytesReceived";
roles[BytesTotalRole] = "bytesTotal";
roles[TimestampRole] = "timestamp";
roles[SpeedRole] = "speed";
roles[DownloadingRole] = "isDownloading";
roles[IncompleteRole] = "isIncomplete";
roles[DownloadErrorRole] = "downloadError";
roles[OrderRole] = "order";
roles[RamrequiredRole] = "ramrequired";
roles[ParametersRole] = "parameters";
roles[QuantRole] = "quant";
roles[TypeRole] = "type";
return roles;
}
int rowCount(const QModelIndex &parent = QModelIndex()) const override;
QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override;
QVariant data(const QString &filename, int role) const;
void updateData(const QString &filename, int role, const QVariant &value);
int count() const { return m_models.size(); }
bool contains(const QString &filename) const;
Q_INVOKABLE ModelInfo modelInfo(const QString &filename) const;
ModelInfo defaultModelInfo() const;
void addModel(const QString &filename);
Q_INVOKABLE QString defaultLocalModelsPath() const;
Q_INVOKABLE QString localModelsPath() const;
Q_INVOKABLE void setLocalModelsPath(const QString &modelPath);
const QList<ModelInfo> exportModelList() const;
const QList<QString> userDefaultModelList() const;
InstalledModels *installedModels() const { return m_installedModels; }
DownloadableModels *downloadableModels() const { return m_downloadableModels; }
static inline QString toFileSize(quint64 sz) {
if (sz < 1024) {
return QString("%1 bytes").arg(sz);
} else if (sz < 1024 * 1024) {
return QString("%1 KB").arg(qreal(sz) / 1024, 0, 'g', 3);
} else if (sz < 1024 * 1024 * 1024) {
return QString("%1 MB").arg(qreal(sz) / (1024 * 1024), 0, 'g', 3);
} else {
return QString("%1 GB").arg(qreal(sz) / (1024 * 1024 * 1024), 0, 'g', 3);
}
}
QString incompleteDownloadPath(const QString &modelFile);
Q_SIGNALS:
void countChanged();
void localModelsPathChanged();
void installedModelsChanged();
void downloadableModelsChanged();
void userDefaultModelListChanged();
private Q_SLOTS:
void updateModelsFromDirectory();
private:
QString modelDirPath(const QString &modelName, bool isChatGPT);
int indexForModel(ModelInfo *model);
QVariant dataInternal(const ModelInfo *info, int role) const;
static bool lessThan(const ModelInfo* a, const ModelInfo* b);
private:
mutable QRecursiveMutex m_mutex;
InstalledModels *m_installedModels;
DownloadableModels *m_downloadableModels;
QList<ModelInfo*> m_models;
QHash<QString, ModelInfo*> m_modelMap;
QString m_localModelsPath;
QFileSystemWatcher *m_watcher;
private:
explicit ModelList();
~ModelList() {}
friend class MyModelList;
};
#endif // MODELLIST_H

View File

@ -1,5 +1,5 @@
#include "network.h"
#include "llm.h"
#include "chatlistmodel.h"
#include "sysinfo.h"
#include <QCoreApplication>
@ -97,10 +97,10 @@ bool Network::packageAndSendJson(const QString &ingestId, const QString &json)
}
Q_ASSERT(doc.isObject());
Q_ASSERT(LLM::globalInstance()->chatListModel()->currentChat());
Q_ASSERT(ChatListModel::globalInstance()->currentChat());
QJsonObject object = doc.object();
object.insert("source", "gpt4all-chat");
object.insert("agent_id", LLM::globalInstance()->chatListModel()->currentChat()->modelName());
object.insert("agent_id", ChatListModel::globalInstance()->currentChat()->modelInfo().filename);
object.insert("submitter_id", m_uniqueId);
object.insert("ingest_id", ingestId);
@ -394,7 +394,7 @@ void Network::sendMixpanelEvent(const QString &ev, const QVector<KeyValue> &valu
if (!m_usageStatsActive)
return;
Q_ASSERT(LLM::globalInstance()->chatListModel()->currentChat());
Q_ASSERT(ChatListModel::globalInstance()->currentChat());
QJsonObject properties;
properties.insert("token", "ce362e568ddaee16ed243eaffb5860a2");
properties.insert("time", QDateTime::currentSecsSinceEpoch());
@ -405,13 +405,13 @@ void Network::sendMixpanelEvent(const QString &ev, const QVector<KeyValue> &valu
properties.insert("ip", m_ipify);
properties.insert("name", QCoreApplication::applicationName() + " v"
+ QCoreApplication::applicationVersion());
properties.insert("model", LLM::globalInstance()->chatListModel()->currentChat()->modelName());
properties.insert("model", ChatListModel::globalInstance()->currentChat()->modelInfo().filename);
// Some additional startup information
if (ev == "startup") {
const QSize display = QGuiApplication::primaryScreen()->size();
properties.insert("display", QString("%1x%2").arg(display.width()).arg(display.height()));
properties.insert("ram", getSystemTotalRAM());
properties.insert("ram", getSystemTotalRAMInGB());
#if defined(Q_OS_MAC)
properties.insert("cpu", QString::fromStdString(getCPUModel()));
#endif

View File

@ -3,6 +3,7 @@ import QtQuick
import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import chatlistmodel
import llm
import download
import network
@ -48,8 +49,8 @@ Drawer {
color: newChat.hovered ? theme.backgroundDark : theme.backgroundDarkest
}
onClicked: {
LLM.chatListModel.addChat();
Network.sendNewChat(LLM.chatListModel.count)
ChatListModel.addChat();
Network.sendNewChat(ChatListModel.count)
}
}
@ -69,15 +70,15 @@ Drawer {
anchors.fill: parent
anchors.rightMargin: 10
model: LLM.chatListModel
model: ChatListModel
delegate: Rectangle {
id: chatRectangle
width: conversationList.width
height: chatName.height
opacity: 0.9
property bool isCurrent: LLM.chatListModel.currentChat === LLM.chatListModel.get(index)
property bool isServer: LLM.chatListModel.get(index) && LLM.chatListModel.get(index).isServer
property bool isCurrent: ChatListModel.currentChat === ChatListModel.get(index)
property bool isServer: ChatListModel.get(index) && ChatListModel.get(index).isServer
property bool trashQuestionDisplayed: false
visible: !isServer || LLM.serverEnabled
z: isCurrent ? 199 : 1
@ -119,7 +120,7 @@ Drawer {
Network.sendRenameChat()
}
function changeName() {
LLM.chatListModel.get(index).name = chatName.text
ChatListModel.get(index).name = chatName.text
chatName.focus = false
chatName.readOnly = true
chatName.selectByMouse = false
@ -128,7 +129,7 @@ Drawer {
onTapped: {
if (isCurrent)
return;
LLM.chatListModel.currentChat = LLM.chatListModel.get(index);
ChatListModel.currentChat = ChatListModel.get(index);
}
}
Accessible.role: Accessible.Button
@ -201,7 +202,7 @@ Drawer {
color: "transparent"
}
onClicked: {
LLM.chatListModel.removeChat(LLM.chatListModel.get(index))
ChatListModel.removeChat(ChatListModel.get(index))
Network.sendRemoveChat()
}
Accessible.role: Accessible.Button

View File

@ -4,6 +4,7 @@ import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import QtQuick.Dialogs
import chatlistmodel
import localdocs
import llm
@ -15,7 +16,7 @@ Dialog {
width: 480
height: 640
property var currentChat: LLM.chatListModel.currentChat
property var currentChat: ChatListModel.currentChat
background: Rectangle {
anchors.fill: parent

View File

@ -4,15 +4,17 @@ import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Dialogs
import QtQuick.Layouts
import chatlistmodel
import download
import llm
import modellist
import network
Dialog {
id: modelDownloaderDialog
modal: true
opacity: 0.9
closePolicy: LLM.chatListModel.currentChat.modelList.length === 0 ? Popup.NoAutoClose : (Popup.CloseOnEscape | Popup.CloseOnPressOutside)
closePolicy: ModelList.installedModels.count === 0 ? Popup.NoAutoClose : (Popup.CloseOnEscape | Popup.CloseOnPressOutside)
padding: 20
bottomPadding: 30
background: Rectangle {
@ -27,7 +29,7 @@ Dialog {
Network.sendModelDownloaderDialog();
}
property string defaultModelPath: Download.defaultLocalModelsPath()
property string defaultModelPath: ModelList.defaultLocalModelsPath()
property alias modelPath: settings.modelPath
Settings {
id: settings
@ -35,13 +37,19 @@ Dialog {
}
Component.onCompleted: {
Download.downloadLocalModelsPath = settings.modelPath
ModelList.localModelsPath = settings.modelPath
}
Component.onDestruction: {
settings.sync()
}
PopupDialog {
id: downloadingErrorPopup
anchors.centerIn: parent
shouldTimeOut: false
}
ColumnLayout {
anchors.fill: parent
anchors.margins: 20
@ -56,7 +64,7 @@ Dialog {
}
Label {
visible: !Download.modelList.length
visible: !ModelList.downloadableModels.count
Layout.fillWidth: true
Layout.fillHeight: true
horizontalAlignment: Qt.AlignHCenter
@ -73,323 +81,327 @@ Dialog {
clip: true
ListView {
id: modelList
model: Download.modelList
id: modelListView
model: ModelList.downloadableModels
boundsBehavior: Flickable.StopAtBounds
delegate: Item {
delegate: Rectangle {
id: delegateItem
width: modelList.width
height: modelName.height + modelName.padding
+ description.height + description.padding
objectName: "delegateItem"
property bool downloading: false
Rectangle {
anchors.fill: parent
color: index % 2 === 0 ? theme.backgroundLight : theme.backgroundLighter
}
width: modelListView.width
height: childrenRect.height
color: index % 2 === 0 ? theme.backgroundLight : theme.backgroundLighter
Text {
id: modelName
objectName: "modelName"
property string filename: modelData.filename
text: !modelData.isChatGPT ? (filename.startsWith("ggml-") ? filename.slice(5, filename.length - 4) : filename.slice(0, filename.length - 4)) : filename
padding: 20
anchors.top: parent.top
anchors.left: parent.left
font.bold: modelData.isDefault || modelData.bestGPTJ || modelData.bestLlama || modelData.bestMPT
color: theme.assistantColor
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("Model file")
Accessible.description: qsTr("Model file to be downloaded")
}
GridLayout {
columns: 2
width: parent.width
Text {
id: description
text: " - " + modelData.description
leftPadding: 20
rightPadding: 20
anchors.top: modelName.bottom
anchors.left: modelName.left
anchors.right: parent.right
wrapMode: Text.WordWrap
textFormat: Text.StyledText
color: theme.textColor
linkColor: theme.textColor
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("Description")
Accessible.description: qsTr("The description of the file")
onLinkActivated: Qt.openUrlExternally(link)
}
Text {
id: isDefault
text: qsTr("(default)")
visible: modelData.isDefault
anchors.top: modelName.top
anchors.left: modelName.right
padding: 20
color: theme.textColor
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("Default file")
Accessible.description: qsTr("Whether the file is the default model")
}
Text {
text: modelData.filesize
anchors.top: modelName.top
anchors.left: isDefault.visible ? isDefault.right : modelName.right
padding: 20
color: theme.textColor
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("File size")
Accessible.description: qsTr("The size of the file")
}
Label {
id: speedLabel
anchors.top: modelName.top
anchors.right: itemProgressBar.left
padding: 20
objectName: "speedLabel"
color: theme.textColor
text: ""
visible: downloading
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("Download speed")
Accessible.description: qsTr("Download speed in bytes/kilobytes/megabytes per second")
}
ProgressBar {
id: itemProgressBar
objectName: "itemProgressBar"
anchors.top: modelName.top
anchors.right: downloadButton.left
anchors.topMargin: 20
anchors.rightMargin: 20
width: 100
visible: downloading
background: Rectangle {
implicitWidth: 200
implicitHeight: 30
color: theme.backgroundDarkest
radius: 3
}
contentItem: Item {
implicitWidth: 200
implicitHeight: 25
Rectangle {
width: itemProgressBar.visualPosition * parent.width
height: parent.height
radius: 2
color: theme.assistantColor
}
}
Accessible.role: Accessible.ProgressBar
Accessible.name: qsTr("Download progressBar")
Accessible.description: qsTr("Shows the progress made in the download")
}
Item {
visible: modelData.calcHash
anchors.top: modelName.top
anchors.right: parent.right
Label {
id: calcHashLabel
anchors.right: busyCalcHash.left
padding: 20
objectName: "calcHashLabel"
color: theme.textColor
text: qsTr("Calculating MD5...")
Text {
textFormat: Text.StyledText
text: "<h2>" + (name !== "" ? name : filename) + "</h2>"
Layout.row: 0
Layout.column: 0
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.columnSpan: 2
color: theme.assistantColor
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: qsTr("Whether the file hash is being calculated")
Accessible.name: qsTr("Model file")
Accessible.description: qsTr("Model file to be downloaded")
}
MyBusyIndicator {
id: busyCalcHash
anchors.right: parent.right
padding: 20
running: modelData.calcHash
Accessible.role: Accessible.Animation
Accessible.name: qsTr("Busy indicator")
Accessible.description: qsTr("Displayed when the file hash is being calculated")
}
}
Rectangle {
id: actionBox
width: childrenRect.width + 20
color: theme.backgroundDark
border.width: 1
radius: 10
Layout.row: 1
Layout.column: 1
Layout.rightMargin: 20
Layout.bottomMargin: 20
Layout.fillHeight: true
Layout.minimumHeight: childrenRect.height + 20
Layout.alignment: Qt.AlignRight | Qt.AlignTop
Layout.rowSpan: 2
Item {
anchors.top: modelName.top
anchors.topMargin: 15
anchors.right: parent.right
visible: modelData.installed
Label {
id: installedLabel
anchors.verticalCenter: removeButton.verticalCenter
anchors.right: removeButton.left
anchors.rightMargin: 15
objectName: "installedLabel"
color: theme.textColor
text: qsTr("Already installed")
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: qsTr("Whether the file is already installed on your system")
}
MyButton {
id: removeButton
text: "Remove"
anchors.right: parent.right
anchors.rightMargin: 20
Accessible.description: qsTr("Remove button to remove model from filesystem")
onClicked: {
Download.removeModel(modelData.filename);
}
}
}
Item {
visible: modelData.isChatGPT && !modelData.installed
anchors.top: modelName.top
anchors.topMargin: 15
anchors.right: parent.right
TextField {
id: openaiKey
anchors.right: installButton.left
anchors.rightMargin: 15
color: theme.textColor
background: Rectangle {
color: theme.backgroundLighter
radius: 10
}
placeholderText: qsTr("enter $OPENAI_API_KEY")
placeholderTextColor: theme.backgroundLightest
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
}
Button {
id: installButton
contentItem: Text {
color: openaiKey.text === "" ? theme.backgroundLightest : theme.textColor
text: "Install"
}
enabled: openaiKey.text !== ""
anchors.right: parent.right
anchors.rightMargin: 20
background: Rectangle {
opacity: .5
border.color: theme.backgroundLightest
border.width: 1
radius: 10
color: theme.backgroundLight
}
onClicked: {
Download.installModel(modelData.filename, openaiKey.text);
}
Accessible.role: Accessible.Button
Accessible.name: qsTr("Install button")
Accessible.description: qsTr("Install button to install chatgpt model")
}
}
MyButton {
id: downloadButton
text: downloading ? qsTr("Cancel") : qsTr("Download")
anchors.top: modelName.top
anchors.right: parent.right
anchors.topMargin: 15
anchors.rightMargin: 20
visible: !modelData.isChatGPT && !modelData.installed && !modelData.calcHash
Accessible.description: qsTr("Cancel/Download button to stop/start the download")
onClicked: {
if (!downloading) {
downloading = true;
Download.downloadModel(modelData.filename);
} else {
downloading = false;
Download.cancelDownload(modelData.filename);
}
}
}
}
Component.onCompleted: {
Download.downloadProgress.connect(updateProgress);
Download.downloadFinished.connect(resetProgress);
}
property var lastUpdate: ({})
function updateProgress(bytesReceived, bytesTotal, modelName) {
let currentTime = new Date().getTime();
for (let i = 0; i < modelList.contentItem.children.length; i++) {
let delegateItem = modelList.contentItem.children[i];
if (delegateItem.objectName === "delegateItem") {
let modelNameText = delegateItem.children.find(child => child.objectName === "modelName").filename;
if (modelNameText === modelName) {
let progressBar = delegateItem.children.find(child => child.objectName === "itemProgressBar");
progressBar.value = bytesReceived / bytesTotal;
let updated = false;
// Calculate the download speed
if (lastUpdate[modelName] && lastUpdate[modelName].timestamp) {
let timeDifference = currentTime - lastUpdate[modelName].timestamp;
if (timeDifference >= 1500) {
let bytesDifference = bytesReceived - lastUpdate[modelName].bytesReceived;
let speed = (bytesDifference / timeDifference) * 1000; // bytes per second
delegateItem.downloading = true
// Update the speed label
let speedLabel = delegateItem.children.find(child => child.objectName === "speedLabel");
if (speed < 1024) {
speedLabel.text = speed.toFixed(2) + " B/s";
} else if (speed < 1024 * 1024) {
speedLabel.text = (speed / 1024).toFixed(2) + " KB/s";
} else {
speedLabel.text = (speed / (1024 * 1024)).toFixed(2) + " MB/s";
}
updated = true;
ColumnLayout {
spacing: 0
MyButton {
id: downloadButton
text: isDownloading ? qsTr("Cancel") : isIncomplete ? qsTr("Resume") : qsTr("Download")
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: openaiKey.width
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
visible: !isChatGPT && !installed && !calcHash && downloadError === ""
Accessible.description: qsTr("Cancel/Resume/Download button to stop/restart/start the download")
background: Rectangle {
border.color: downloadButton.down ? theme.backgroundLightest : theme.buttonBorder
border.width: 2
radius: 10
color: downloadButton.hovered ? theme.backgroundDark : theme.backgroundDarkest
}
onClicked: {
if (!isDownloading) {
Download.downloadModel(filename);
} else {
Download.cancelDownload(filename);
}
}
} else {
updated = true; // To get an initial entry in lastUpdate
}
// Update the lastUpdate object for the current model
if (updated) {
lastUpdate[modelName] = {"timestamp": currentTime, "bytesReceived": bytesReceived};
MyButton {
id: removeButton
text: qsTr("Remove")
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: openaiKey.width
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
visible: installed || downloadError !== ""
Accessible.description: qsTr("Remove button to remove model from filesystem")
background: Rectangle {
border.color: removeButton.down ? theme.backgroundLightest : theme.buttonBorder
border.width: 2
radius: 10
color: removeButton.hovered ? theme.backgroundDark : theme.backgroundDarkest
}
onClicked: {
Download.removeModel(filename);
}
}
MyButton {
id: installButton
visible: !installed && isChatGPT
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: openaiKey.width
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
text: qsTr("Install")
background: Rectangle {
border.color: installButton.down ? theme.backgroundLightest : theme.buttonBorder
border.width: 2
radius: 10
color: installButton.hovered ? theme.backgroundDark : theme.backgroundDarkest
}
onClicked: {
if (openaiKey.text === "")
openaiKey.showError();
else
Download.installModel(filename, openaiKey.text);
}
Accessible.role: Accessible.Button
Accessible.name: qsTr("Install button")
Accessible.description: qsTr("Install button to install chatgpt model")
}
ColumnLayout {
spacing: 0
Label {
Layout.topMargin: 20
Layout.leftMargin: 20
textFormat: Text.StyledText
text: "<strong><font size=\"1\">"
+ (qsTr("Status: ")
+ (installed ? qsTr("Installed")
: (downloadError !== "" ? qsTr("<a href=\"#error\">Error</a>")
: (isDownloading ? qsTr("Downloading") : qsTr("Available")))))
+ "</strong></font>"
color: theme.textColor
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: qsTr("Whether the file is already installed on your system")
onLinkActivated: {
downloadingErrorPopup.text = downloadError;
downloadingErrorPopup.open();
}
}
Label {
Layout.leftMargin: 20
textFormat: Text.StyledText
text: "<strong><font size=\"1\">"
+ (qsTr("Download size: ") + filesize)
+ "<br>"
+ (qsTr("RAM required: ") + (ramrequired > 0 ? ramrequired + " GB" : qsTr("minimal")))
+ "<br>"
+ (qsTr("Parameters: ") + parameters)
+ "<br>"
+ (qsTr("Quantization: ") + quant)
+ "<br>"
+ (qsTr("Type: ") + type)
+ "</strong></font>"
color: theme.textColor
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: qsTr("Metadata about the model")
onLinkActivated: {
downloadingErrorPopup.text = downloadError;
downloadingErrorPopup.open();
}
}
Label {
visible: LLM.systemTotalRAMInGB() < ramrequired
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.maximumWidth: openaiKey.width
textFormat: Text.StyledText
text: qsTr("<strong><font size=\"2\">WARNING: Not recommended for your hardware.")
+ qsTr(" Model requires more memory (") + ramrequired
+ qsTr(" GB) than your system has available (")
+ LLM.systemTotalRAMInGBString() + ").</strong></font>"
color: theme.textErrorColor
wrapMode: Text.WordWrap
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: qsTr("Error for incompatible hardware")
onLinkActivated: {
downloadingErrorPopup.text = downloadError;
downloadingErrorPopup.open();
}
}
}
ColumnLayout {
visible: isDownloading && !calcHash
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: openaiKey.width
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
spacing: 20
ProgressBar {
id: itemProgressBar
Layout.fillWidth: true
width: openaiKey.width
value: bytesReceived / bytesTotal
background: Rectangle {
implicitHeight: 45
color: theme.backgroundDarkest
radius: 3
}
contentItem: Item {
implicitHeight: 40
Rectangle {
width: itemProgressBar.visualPosition * parent.width
height: parent.height
radius: 2
color: theme.assistantColor
}
}
Accessible.role: Accessible.ProgressBar
Accessible.name: qsTr("Download progressBar")
Accessible.description: qsTr("Shows the progress made in the download")
}
Label {
id: speedLabel
color: theme.textColor
Layout.alignment: Qt.AlignRight
text: speed
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("Download speed")
Accessible.description: qsTr("Download speed in bytes/kilobytes/megabytes per second")
}
}
RowLayout {
visible: calcHash
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: openaiKey.width
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
Label {
id: calcHashLabel
color: theme.textColor
text: qsTr("Calculating MD5...")
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: qsTr("Whether the file hash is being calculated")
}
MyBusyIndicator {
id: busyCalcHash
running: calcHash
Accessible.role: Accessible.Animation
Accessible.name: qsTr("Busy indicator")
Accessible.description: qsTr("Displayed when the file hash is being calculated")
}
}
MyTextField {
id: openaiKey
visible: !installed && isChatGPT
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: 150
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
color: theme.textColor
background: Rectangle {
color: theme.backgroundLighter
radius: 10
}
function showError() {
openaiKey.placeholderTextColor = theme.textErrorColor
}
onTextChanged: {
openaiKey.placeholderTextColor = theme.backgroundLightest
}
placeholderText: qsTr("enter $OPENAI_API_KEY")
placeholderTextColor: theme.backgroundLightest
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
}
break;
}
}
Text {
id: descriptionText
text: description
Layout.row: 1
Layout.column: 0
Layout.leftMargin: 20
Layout.bottomMargin: 20
Layout.maximumWidth: modelListView.width - actionBox.width - 60
wrapMode: Text.WordWrap
textFormat: Text.StyledText
color: theme.textColor
linkColor: theme.textColor
Accessible.role: Accessible.Paragraph
Accessible.name: qsTr("Description")
Accessible.description: qsTr("The description of the file")
onLinkActivated: Qt.openUrlExternally(link)
}
}
}
function resetProgress(modelName) {
for (let i = 0; i < modelList.contentItem.children.length; i++) {
let delegateItem = modelList.contentItem.children[i];
if (delegateItem.objectName === "delegateItem") {
let modelNameText = delegateItem.children.find(child => child.objectName === "modelName").filename;
if (modelNameText === modelName) {
let progressBar = delegateItem.children.find(child => child.objectName === "itemProgressBar");
progressBar.value = 0;
delegateItem.downloading = false;
// Remove speed label text
let speedLabel = delegateItem.children.find(child => child.objectName === "speedLabel");
speedLabel.text = "";
// Remove the lastUpdate object for the canceled model
delete lastUpdate[modelName];
break;
footer: Component {
Rectangle {
width: modelListView.width
height: expandButton.height + 80
color: ModelList.downloadableModels.count % 2 === 0 ? theme.backgroundLight : theme.backgroundLighter
MyButton {
id: expandButton
anchors.centerIn: parent
padding: 40
text: ModelList.downloadableModels.expanded ? qsTr("Show fewer models") : qsTr("Show more models")
background: Rectangle {
border.color: expandButton.down ? theme.backgroundLightest : theme.buttonBorder
border.width: 2
radius: 10
color: expandButton.hovered ? theme.backgroundDark : theme.backgroundDarkest
}
onClicked: {
ModelList.downloadableModels.expanded = !ModelList.downloadableModels.expanded;
}
}
}
@ -404,11 +416,11 @@ Dialog {
FolderDialog {
id: modelPathDialog
title: "Please choose a directory"
currentFolder: "file://" + Download.downloadLocalModelsPath
currentFolder: "file://" + ModelList.localModelsPath
onAccepted: {
modelPathDisplayField.text = selectedFolder
Download.downloadLocalModelsPath = modelPathDisplayField.text
settings.modelPath = Download.downloadLocalModelsPath
ModelList.localModelsPath = modelPathDisplayField.text
settings.modelPath = ModelList.localModelsPath
settings.sync()
}
}
@ -421,7 +433,7 @@ Dialog {
}
MyDirectoryField {
id: modelPathDisplayField
text: Download.downloadLocalModelsPath
text: ModelList.localModelsPath
Layout.fillWidth: true
ToolTip.text: qsTr("Path where model files will be downloaded to")
ToolTip.visible: hovered
@ -430,11 +442,11 @@ Dialog {
Accessible.description: ToolTip.text
onEditingFinished: {
if (isValid) {
Download.downloadLocalModelsPath = modelPathDisplayField.text
settings.modelPath = Download.downloadLocalModelsPath
ModelList.localModelsPath = modelPathDisplayField.text
settings.modelPath = ModelList.localModelsPath
settings.sync()
} else {
text = Download.downloadLocalModelsPath
text = ModelList.localModelsPath
}
}
}

View File

@ -23,18 +23,21 @@ Dialog {
anchors.centerIn: parent
spacing: 20
Text {
Label {
id: textField
width: Math.min(1024, implicitWidth)
height: Math.min(600, implicitHeight)
anchors.verticalCenter: shouldShowBusy ? busyIndicator.verticalCenter : parent.verticalCenter
horizontalAlignment: Text.AlignLeft
verticalAlignment: Text.AlignVCenter
textFormat: Text.StyledText
wrapMode: Text.WordWrap
color: theme.textColor
linkColor: theme.linkColor
Accessible.role: Accessible.HelpBalloon
Accessible.name: text
Accessible.description: qsTr("Reveals a shortlived help balloon")
onLinkActivated: { Qt.openUrlExternally("https://discord.gg/4M2QFmTt2k") }
}
MyBusyIndicator {

View File

@ -5,7 +5,9 @@ import QtQuick.Controls.Basic
import QtQuick.Dialogs
import QtQuick.Layouts
import Qt.labs.folderlistmodel
import chatlistmodel
import download
import modellist
import network
import llm
@ -27,7 +29,7 @@ Dialog {
Network.sendSettingsDialog();
}
property var currentChat: LLM.chatListModel.currentChat
property var currentChat: ChatListModel.currentChat
Theme {
id: theme
@ -47,7 +49,7 @@ Dialog {
property string defaultPromptTemplate: "### Human:
%1
### Assistant:\n"
property string defaultModelPath: Download.defaultLocalModelsPath()
property string defaultModelPath: ModelList.defaultLocalModelsPath()
property string defaultUserDefaultModel: "Application default"
property alias temperature: settings.temperature
@ -102,20 +104,20 @@ Dialog {
settings.saveChatGPTChats = defaultSaveChatGPTChats
settings.serverChat = defaultServerChat
settings.userDefaultModel = defaultUserDefaultModel
Download.downloadLocalModelsPath = settings.modelPath
ModelList.localModelsPath = settings.modelPath
LLM.threadCount = settings.threadCount
LLM.serverEnabled = settings.serverChat
LLM.chatListModel.shouldSaveChats = settings.saveChats
LLM.chatListModel.shouldSaveChatGPTChats = settings.saveChatGPTChats
ChatListModel.shouldSaveChats = settings.saveChats
chatListModel.shouldSaveChatGPTChats = settings.saveChatGPTChats
settings.sync()
}
Component.onCompleted: {
LLM.threadCount = settings.threadCount
LLM.serverEnabled = settings.serverChat
LLM.chatListModel.shouldSaveChats = settings.saveChats
LLM.chatListModel.shouldSaveChatGPTChats = settings.saveChatGPTChats
Download.downloadLocalModelsPath = settings.modelPath
ChatListModel.shouldSaveChats = settings.saveChats
ChatListModel.shouldSaveChatGPTChats = settings.saveChatGPTChats
ModelList.localModelsPath = settings.modelPath
}
Connections {
@ -515,13 +517,32 @@ Dialog {
Accessible.description: ToolTip.text
}
Label {
id: promptTemplateLabel
text: qsTr("Prompt Template:")
color: theme.textColor
ColumnLayout {
Layout.row: 7
Layout.column: 0
Layout.topMargin: 10
Layout.alignment: Qt.AlignTop
spacing: 20
Label {
id: promptTemplateLabel
text: qsTr("Prompt Template:")
color: theme.textColor
}
Label {
id: promptTemplateLabelHelp
Layout.maximumWidth: promptTemplateLabel.width
visible: settings.promptTemplate.indexOf(
"%1") === -1
color: theme.textErrorColor
text: qsTr("Must contain the string \"%1\" to be replaced with the user's input.")
wrapMode: TextArea.Wrap
Accessible.role: Accessible.EditableText
Accessible.name: text
}
}
Rectangle {
Layout.row: 7
Layout.column: 1
@ -529,20 +550,6 @@ Dialog {
height: 200
color: "transparent"
clip: true
Label {
id: promptTemplateLabelHelp
visible: settings.promptTemplate.indexOf(
"%1") === -1
font.bold: true
color: theme.textErrorColor
text: qsTr("Prompt template must contain %1 to be replaced with the user's input.")
anchors.fill: templateScrollView
z: 200
padding: 10
wrapMode: TextArea.Wrap
Accessible.role: Accessible.EditableText
Accessible.name: text
}
ScrollView {
id: templateScrollView
anchors.fill: parent
@ -615,31 +622,21 @@ Dialog {
Layout.row: 1
Layout.column: 1
Layout.minimumWidth: 350
model: modelList
model: ModelList.userDefaultModelList
Accessible.role: Accessible.ComboBox
Accessible.name: qsTr("ComboBox for displaying/picking the default model")
Accessible.description: qsTr("Use this for picking the default model to use; the first item is the current default model")
function updateModel(newModelList) {
var newArray = Array.from(newModelList);
newArray.unshift('Application default');
comboBox.model = newArray;
function updateModel() {
settings.sync();
comboBox.currentIndex = comboBox.indexOfValue(settingsDialog.userDefaultModel);
}
Component.onCompleted: {
comboBox.updateModel(currentChat.modelList)
comboBox.updateModel()
}
Connections {
target: settings
function onUserDefaultModelChanged() {
comboBox.updateModel(currentChat.modelList)
}
}
Connections {
target: currentChat
function onModelListChanged() {
comboBox.updateModel(currentChat.modelList)
comboBox.updateModel()
}
}
onActivated: {
@ -650,11 +647,11 @@ Dialog {
FolderDialog {
id: modelPathDialog
title: "Please choose a directory"
currentFolder: "file://" + Download.downloadLocalModelsPath
currentFolder: "file://" + ModelList.localModelsPath
onAccepted: {
modelPathDisplayField.text = selectedFolder
Download.downloadLocalModelsPath = modelPathDisplayField.text
settings.modelPath = Download.downloadLocalModelsPath
ModelList.localModelsPath = modelPathDisplayField.text
settings.modelPath = ModelList.localModelsPath
settings.sync()
}
}
@ -667,7 +664,7 @@ Dialog {
}
MyDirectoryField {
id: modelPathDisplayField
text: Download.downloadLocalModelsPath
text: ModelList.localModelsPath
implicitWidth: 300
Layout.row: 2
Layout.column: 1
@ -679,11 +676,11 @@ Dialog {
Accessible.description: ToolTip.text
onEditingFinished: {
if (isValid) {
Download.downloadLocalModelsPath = modelPathDisplayField.text
settings.modelPath = Download.downloadLocalModelsPath
ModelList.localModelsPath = modelPathDisplayField.text
settings.modelPath = ModelList.localModelsPath
settings.sync()
} else {
text = Download.downloadLocalModelsPath
text = ModelList.localModelsPath
}
}
}
@ -741,7 +738,7 @@ Dialog {
onClicked: {
Network.sendSaveChatsToggled(saveChatsBox.checked);
settingsDialog.saveChats = saveChatsBox.checked
LLM.chatListModel.shouldSaveChats = saveChatsBox.checked
ChatListModel.shouldSaveChats = saveChatsBox.checked
settings.sync()
}
ToolTip.text: qsTr("WARNING: Saving chats to disk can be ~2GB per chat")
@ -761,7 +758,7 @@ Dialog {
checked: settingsDialog.saveChatGPTChats
onClicked: {
settingsDialog.saveChatGPTChats = saveChatGPTChatsBox.checked
LLM.chatListModel.shouldSaveChatGPTChats = saveChatGPTChatsBox.checked
ChatListModel.shouldSaveChatGPTChats = saveChatGPTChatsBox.checked
settings.sync()
}
}

View File

@ -1,6 +1,7 @@
#include "server.h"
#include "chat.h"
#include "llm.h"
#include "download.h"
#include "modellist.h"
#include <QJsonDocument>
#include <QJsonArray>
@ -10,26 +11,14 @@
//#define DEBUG
static inline QString modelToName(const ModelInfo &info)
{
QString modelName = info.filename;
if (modelName.startsWith("ggml-"))
modelName = modelName.remove(0, 5);
if (modelName.endsWith(".bin"))
modelName.chop(4);
return modelName;
}
static inline QJsonObject modelToJson(const ModelInfo &info)
{
QString modelName = modelToName(info);
QJsonObject model;
model.insert("id", modelName);
model.insert("id", info.name);
model.insert("object", "model");
model.insert("created", "who can keep track?");
model.insert("owned_by", "humanity");
model.insert("root", modelName);
model.insert("root", info.name);
model.insert("parent", QJsonValue::Null);
QJsonArray permissions;
@ -92,7 +81,7 @@ void Server::start()
if (!LLM::globalInstance()->serverEnabled())
return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);
const QList<ModelInfo> modelList = Download::globalInstance()->modelList();
const QList<ModelInfo> modelList = ModelList::globalInstance()->exportModelList();
QJsonObject root;
root.insert("object", "list");
QJsonArray data;
@ -111,14 +100,13 @@ void Server::start()
if (!LLM::globalInstance()->serverEnabled())
return QHttpServerResponse(QHttpServerResponder::StatusCode::Unauthorized);
const QList<ModelInfo> modelList = Download::globalInstance()->modelList();
const QList<ModelInfo> modelList = ModelList::globalInstance()->exportModelList();
QJsonObject object;
for (const ModelInfo &info : modelList) {
if (!info.installed)
continue;
QString modelName = modelToName(info);
if (model == modelName) {
if (model == info.name) {
object = modelToJson(info);
break;
}
@ -179,14 +167,14 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
messages = body["messages"].toArray();
}
const QString model = body["model"].toString();
bool foundModel = false;
const QList<ModelInfo> modelList = Download::globalInstance()->modelList();
const QString modelRequested = body["model"].toString();
ModelInfo modelInfo = ModelList::globalInstance()->defaultModelInfo();
const QList<ModelInfo> modelList = ModelList::globalInstance()->exportModelList();
for (const ModelInfo &info : modelList) {
if (!info.installed)
continue;
if (model == modelToName(info)) {
foundModel = true;
if (modelRequested == info.name) {
modelInfo = info;
break;
}
}
@ -296,13 +284,11 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
// load the new model if necessary
setShouldBeLoaded(true);
if (!foundModel) {
if (!loadDefaultModel()) {
std::cerr << "ERROR: couldn't load default model " << model.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::BadRequest);
}
} else if (!loadModel(model)) {
std::cerr << "ERROR: couldn't load model " << model.toStdString() << std::endl;
if (!modelInfo.name.isEmpty()) {
std::cerr << "ERROR: couldn't load default model " << modelRequested.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::BadRequest);
} else if (!loadModel(modelInfo)) {
std::cerr << "ERROR: couldn't load model " << modelInfo.name.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::InternalServerError);
}
@ -334,7 +320,7 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
repeat_last_n,
LLM::globalInstance()->threadCount())) {
std::cerr << "ERROR: couldn't prompt model " << model.toStdString() << std::endl;
std::cerr << "ERROR: couldn't prompt model " << modelInfo.name.toStdString() << std::endl;
return QHttpServerResponse(QHttpServerResponder::StatusCode::InternalServerError);
}
QString echoedPrompt = actualPrompt;
@ -352,7 +338,7 @@ QHttpServerResponse Server::handleCompletionRequest(const QHttpServerRequest &re
responseObject.insert("id", "foobarbaz");
responseObject.insert("object", "text_completion");
responseObject.insert("created", QDateTime::currentSecsSinceEpoch());
responseObject.insert("model", modelName());
responseObject.insert("model", modelInfo.name);
QJsonArray choices;

View File

@ -1,49 +1,61 @@
#include <QtCore/QCoreApplication>
#include <QDebug>
#include <QFile>
#include <QTextStream>
#include <QRegularExpression>
#ifndef SYSINFO_H
#define SYSINFO_H
#if defined(Q_OS_MAC)
#include <fstream>
#include <string>
#include <sstream>
#include <iomanip>
#if defined(__linux__)
#include <unistd.h>
#elif defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
#elif defined(_WIN32)
#include <windows.h>
#endif
#if defined(Q_OS_WIN)
#include <Windows.h>
#endif
QString getSystemTotalRAM()
static long long getSystemTotalRAMInBytes()
{
qint64 totalRAM = 0;
long long totalRAM = 0;
#if defined(Q_OS_LINUX)
QFile file("/proc/meminfo");
if (file.open(QIODevice::ReadOnly | QIODevice::Text)) {
QTextStream in(&file);
QString line = in.readLine();
while (!line.isNull()) {
if (line.startsWith("MemTotal")) {
static QRegularExpression spaces("\\s+");
QStringList parts = line.split(spaces);
totalRAM = parts[1].toLongLong() * 1024; // Convert from KB to bytes
break;
}
line = in.readLine();
#if defined(__linux__)
std::ifstream file("/proc/meminfo");
std::string line;
while (std::getline(file, line)) {
if (line.find("MemTotal") != std::string::npos) {
std::string memTotalStr = line.substr(line.find(":") + 1);
memTotalStr.erase(0, memTotalStr.find_first_not_of(" "));
memTotalStr = memTotalStr.substr(0, memTotalStr.find(" "));
totalRAM = std::stoll(memTotalStr) * 1024; // Convert from KB to bytes
break;
}
file.close();
}
#elif defined(Q_OS_MAC)
file.close();
#elif defined(__APPLE__)
int mib[2] = {CTL_HW, HW_MEMSIZE};
size_t length = sizeof(totalRAM);
sysctl(mib, 2, &totalRAM, &length, NULL, 0);
#elif defined(Q_OS_WIN)
#elif defined(_WIN32)
MEMORYSTATUSEX memoryStatus;
memoryStatus.dwLength = sizeof(memoryStatus);
GlobalMemoryStatusEx(&memoryStatus);
totalRAM = memoryStatus.ullTotalPhys;
#endif
double totalRAM_GB = static_cast<double>(totalRAM) / (1024 * 1024 * 1024);
return QString::number(totalRAM_GB, 'f', 2) + " GB";
return totalRAM;
}
static double getSystemTotalRAMInGB()
{
return static_cast<double>(getSystemTotalRAMInBytes()) / (1024 * 1024 * 1024);
}
static std::string getSystemTotalRAMInGBString()
{
std::stringstream ss;
ss << std::fixed << std::setprecision(2) << getSystemTotalRAMInGB() << " GB";
return ss.str();
}
#endif // SYSINFO_H