backend: fix #includes with include-what-you-use (#2371)

Also fix a PARENT_SCOPE warning when building the backend.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-05-31 16:34:54 -04:00 committed by GitHub
parent 8ba7ef4832
commit 636307160e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 87 additions and 50 deletions

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.16)
cmake_minimum_required(VERSION 3.21) # for PROJECT_IS_TOP_LEVEL
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
@ -141,7 +141,7 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
prepare_target(gptj llama-mainline)
endif()
if (BUILD_VARIANT STREQUAL cuda)
if (NOT PROJECT_IS_TOP_LEVEL AND BUILD_VARIANT STREQUAL cuda)
set(CUDAToolkit_BIN_DIR ${CUDAToolkit_BIN_DIR} PARENT_SCOPE)
endif()
endforeach()

View File

@ -1,33 +1,28 @@
#define GPTJ_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
#include "gptj_impl.h"
#include "utils.h"
#include "llmodel.h"
#include "llmodel_shared.h"
#include "utils.h"
#include <ggml.h>
#include <algorithm>
#include <cassert>
#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <map>
#include <string>
#include <vector>
#include <ctime>
#include <iostream>
#if defined(_WIN32) && defined(_MSC_VER)
#define WIN32_LEAN_AND_MEAN
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <io.h>
#include <stdio.h>
#else
#include <unistd.h>
#endif
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <unordered_set>
#include <ggml.h>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
namespace {
const char *modelType_ = "GPT-J";

View File

@ -4,11 +4,12 @@
#ifndef GPTJ_H
#define GPTJ_H
#include <string>
#include <functional>
#include <vector>
#include "llmodel.h"
#include <functional>
#include <string>
#include <vector>
struct GPTJPrivate;
class GPTJ : public LLModel {
public:

View File

@ -1,26 +1,33 @@
#define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
#include "llamamodel_impl.h"
#include "llmodel.h"
#include <ggml.h>
#include <llama.h>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <initializer_list>
#include <iomanip>
#include <iostream>
#include <map>
#include <iterator>
#include <memory>
#include <numeric>
#include <random>
#include <optional>
#include <sstream>
#include <stdexcept>
#include <string>
#include <thread>
#include <unordered_set>
#include <vector>
#include <llama.h>
#include <ggml.h>
#ifdef GGML_USE_KOMPUTE
# include <ggml-kompute.h>
#elif GGML_USE_VULKAN
@ -31,6 +38,7 @@
using namespace std::string_literals;
// Maximum supported GGUF version
static constexpr int GGUF_VER_MAX = 3;

View File

@ -4,11 +4,12 @@
#ifndef LLAMAMODEL_H
#define LLAMAMODEL_H
#include "llmodel.h"
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "llmodel.h"
struct LLamaPrivate;
struct EmbModelSpec;

View File

@ -1,10 +1,13 @@
#include "llmodel.h"
#include "dlhandle.h"
#include <cassert>
#include <cstdlib>
#include <filesystem>
#include <fstream>
#include <iostream>
#include <iterator>
#include <memory>
#include <optional>
#include <regex>
@ -13,9 +16,6 @@
#include <unordered_map>
#include <vector>
#include "dlhandle.h"
#include "sysinfo.h"
#ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
@ -28,6 +28,10 @@
# include <intrin.h>
#endif
#if defined(__APPLE__) && defined(__aarch64__)
# include "sysinfo.h" // for getSystemTotalRAMInBytes
#endif
namespace fs = std::filesystem;
#ifndef __APPLE__

View File

@ -2,14 +2,15 @@
#define LLMODEL_H
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <fstream>
#include <functional>
#include <limits>
#include <optional>
#include <stdexcept>
#include <string>
#include <string_view>
#include <unordered_map>
#include <utility>
#include <vector>
using namespace std::string_literals;

View File

@ -1,12 +1,18 @@
#include "llmodel_c.h"
#include "llmodel.h"
#include <cerrno>
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <exception>
#include <functional>
#include <iostream>
#include <memory>
#include <optional>
#include <utility>
#include <string>
#include <vector>
struct LLModelWrapper {
LLModel *llModel = nullptr;

View File

@ -1,9 +1,9 @@
#ifndef LLMODEL_C_H
#define LLMODEL_C_H
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef __GNUC__
#define DEPRECATED __attribute__ ((deprecated))

View File

@ -1,10 +1,17 @@
#include "llmodel.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iostream>
#include <optional>
#include <regex>
#include <stdexcept>
#include <string>
#include <unordered_set>
#include <vector>
// TODO(cebtenzzre): replace this with llama_kv_cache_seq_shift for llamamodel (GPT-J needs this as-is)
void LLModel::recalculateContext(PromptContext &promptCtx, std::function<bool(bool)> recalculate) {

View File

@ -1,9 +1,11 @@
#pragma once
#include <cstdint>
#include <cstddef>
#include <vector>
#include <ggml.h>
#include <cstddef>
#include <cstdint>
#include <vector>
struct llm_buffer {
uint8_t * addr = NULL;
size_t size = 0;

View File

@ -2,17 +2,21 @@
#define SYSINFO_H
#include <fstream>
#include <string>
#include <sstream>
#include <iomanip>
#include <sstream>
#include <string>
#if defined(__linux__)
#include <unistd.h>
# include <unistd.h>
#elif defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
# include <sys/types.h>
# include <sys/sysctl.h>
#elif defined(_WIN32)
#include <windows.h>
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <windows.h>
#endif
static long long getSystemTotalRAMInBytes()

View File

@ -1,7 +1,12 @@
#include "utils.h"
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iterator>
#include <regex>
#include <utility>
void replace(std::string & str, const std::string & needle, const std::string & replacement) {
size_t pos = 0;
@ -325,4 +330,4 @@ gpt_vocab::id gpt_sample_top_k_top_p(
int idx = dist(rng);
return logits_id[idx].second;
}
}

View File

@ -2,11 +2,14 @@
#pragma once
#include <string>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <map>
#include <vector>
#include <random>
#include <string>
#include <thread>
#include <vector>
//
// General purpose inline functions