Don't start recalculating context immediately upon switching to a new chat

but rather wait until the first prompt. This allows users to switch between
chats fast and to delete chats more easily.

Fixes issue #1545
This commit is contained in:
Adam Treat 2023-10-28 16:39:25 -04:00
parent 7bcd9e8089
commit dc2e7d6e9b

View File

@ -503,6 +503,11 @@ bool ChatLLM::handleRecalculate(bool isRecalc)
}
bool ChatLLM::prompt(const QList<QString> &collectionList, const QString &prompt)
{
if (m_restoreStateFromText) {
Q_ASSERT(m_state.isEmpty());
processRestoreStateFromText();
}
if (!m_processedSystemPrompt)
processSystemPrompt();
const QString promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo);
@ -906,11 +911,6 @@ void ChatLLM::restoreState()
return;
}
if (m_restoreStateFromText) {
Q_ASSERT(m_state.isEmpty());
processRestoreStateFromText();
}
#if defined(DEBUG)
qDebug() << "restoreState" << m_llmThread.objectName() << "size:" << m_state.size();
#endif