mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-11-10 11:49:05 +03:00
Load models from filepath only.
This commit is contained in:
parent
ca2af100cd
commit
69f92d8ea8
4
llm.cpp
4
llm.cpp
@ -91,14 +91,14 @@ bool LLMObject::loadModelPrivate(const QString &modelName)
|
||||
if (info.exists()) {
|
||||
|
||||
auto fin = std::ifstream(filePath.toStdString(), std::ios::binary);
|
||||
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
fin.seekg(0);
|
||||
fin.close();
|
||||
isGPTJ = magic == 0x67676d6c;
|
||||
if (isGPTJ) {
|
||||
m_llmodel = new GPTJ;
|
||||
m_llmodel->loadModel(modelName.toStdString(), fin);
|
||||
m_llmodel->loadModel(filePath.toStdString());
|
||||
} else {
|
||||
m_llmodel = new LLamaModel;
|
||||
m_llmodel->loadModel(filePath.toStdString());
|
||||
|
@ -645,16 +645,12 @@ GPTJ::GPTJ()
|
||||
d_ptr->modelLoaded = false;
|
||||
}
|
||||
|
||||
bool GPTJ::loadModel(const std::string &modelPath)
|
||||
{
|
||||
std::cerr << "GPTJ ERROR: loading gpt model from file unsupported!\n";
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GPTJ::loadModel(const std::string &modelPath, std::istream &fin) {
|
||||
bool GPTJ::loadModel(const std::string &modelPath) {
|
||||
std::mt19937 rng(time(NULL));
|
||||
d_ptr->rng = rng;
|
||||
|
||||
auto fin = std::ifstream(modelPath, std::ios::binary);
|
||||
|
||||
// load the model
|
||||
if (!gptj_model_load(modelPath, fin, d_ptr->model, d_ptr->vocab)) {
|
||||
std::cerr << "GPT-J ERROR: failed to load model from " << modelPath;
|
||||
|
@ -13,7 +13,6 @@ public:
|
||||
~GPTJ();
|
||||
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool loadModel(const std::string &modelPath, std::istream &fin) override;
|
||||
bool isModelLoaded() const override;
|
||||
void prompt(const std::string &prompt,
|
||||
std::function<bool(int32_t)> promptCallback,
|
||||
|
@ -31,12 +31,6 @@ LLamaModel::LLamaModel()
|
||||
d_ptr->modelLoaded = false;
|
||||
}
|
||||
|
||||
bool LLamaModel::loadModel(const std::string &modelPath, std::istream &fin)
|
||||
{
|
||||
std::cerr << "LLAMA ERROR: loading llama model from stream unsupported!\n";
|
||||
return false;
|
||||
}
|
||||
|
||||
bool LLamaModel::loadModel(const std::string &modelPath)
|
||||
{
|
||||
// load the model
|
||||
|
@ -13,7 +13,6 @@ public:
|
||||
~LLamaModel();
|
||||
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool loadModel(const std::string &modelPath, std::istream &fin) override;
|
||||
bool isModelLoaded() const override;
|
||||
void prompt(const std::string &prompt,
|
||||
std::function<bool(int32_t)> promptCallback,
|
||||
|
@ -11,7 +11,6 @@ public:
|
||||
virtual ~LLModel() {}
|
||||
|
||||
virtual bool loadModel(const std::string &modelPath) = 0;
|
||||
virtual bool loadModel(const std::string &modelPath, std::istream &fin) = 0;
|
||||
virtual bool isModelLoaded() const = 0;
|
||||
struct PromptContext {
|
||||
std::vector<float> logits; // logits of current context
|
||||
|
Loading…
Reference in New Issue
Block a user