From 53f109f519f695664ba2536ca1dc1b34305d64a4 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 14 Mar 2024 12:06:07 -0400 Subject: [PATCH] llamamodel: fix macOS build (#2125) Signed-off-by: Jared Van Bortel --- gpt4all-backend/llamamodel.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index 966bf90f..e16de1cb 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -791,14 +791,16 @@ void LLamaModel::embedInternal( } // split into max_len-sized chunks - struct split_batch { int idx; TokenString batch; }; + struct split_batch { unsigned idx; TokenString batch; }; std::vector batches; for (unsigned i = 0; i < inputs.size(); i++) { auto &input = inputs[i]; for (auto it = input.begin(); it < input.end(); it += max_len) { if (it > input.begin()) { it -= chunkOverlap; } auto end = std::min(it + max_len, input.end()); - auto &batch = batches.emplace_back(i, prefixTokens).batch; + batches.push_back({ i, {} }); + auto &batch = batches.back().batch; + batch = prefixTokens; batch.insert(batch.end(), it, end); batch.push_back(eos_token); if (!doMean) { break; /* limit text to one chunk */ }