diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 62928bf3..45d460e2 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -97,7 +97,9 @@ jobs: command: | wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list http://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list - sudo apt update && sudo apt install -y libfontconfig1 libfreetype6 libx11-6 libx11-xcb1 libxext6 libxfixes3 libxi6 libxrender1 libxcb1 libxcb-cursor0 libxcb-glx0 libxcb-keysyms1 libxcb-image0 libxcb-shm0 libxcb-icccm4 libxcb-sync1 libxcb-xfixes0 libxcb-shape0 libxcb-randr0 libxcb-render-util0 libxcb-util1 libxcb-xinerama0 libxcb-xkb1 libxkbcommon0 libxkbcommon-x11-0 bison build-essential flex gperf python3 gcc g++ libgl1-mesa-dev libwayland-dev vulkan-sdk patchelf + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + sudo apt update && sudo apt install -y libfontconfig1 libfreetype6 libx11-6 libx11-xcb1 libxext6 libxfixes3 libxi6 libxrender1 libxcb1 libxcb-cursor0 libxcb-glx0 libxcb-keysyms1 libxcb-image0 libxcb-shm0 libxcb-icccm4 libxcb-sync1 libxcb-xfixes0 libxcb-shape0 libxcb-randr0 libxcb-render-util0 libxcb-util1 libxcb-xinerama0 libxcb-xkb1 libxkbcommon0 libxkbcommon-x11-0 bison build-essential flex gperf python3 gcc g++ libgl1-mesa-dev libwayland-dev vulkan-sdk patchelf cuda-compiler-12-4 libcublas-dev-12-4 libnvidia-compute-550-server libmysqlclient21 libodbc2 libpq5 - run: name: Installing Qt command: | @@ -121,6 +123,7 @@ jobs: set -eo pipefail export CMAKE_PREFIX_PATH=~/Qt/6.5.1/gcc_64/lib/cmake export PATH=$PATH:$HOME/Qt/Tools/QtInstallerFramework/4.7/bin + export PATH=$PATH:/usr/local/cuda/bin mkdir build cd build mkdir upload @@ -162,6 +165,11 @@ jobs: command: | Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install + - run: + name: Install CUDA Toolkit + command: | + Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe + .\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4 - run: name: Build command: | @@ -218,7 +226,9 @@ jobs: command: | wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list http://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list - sudo apt update && sudo apt install -y libfontconfig1 libfreetype6 libx11-6 libx11-xcb1 libxext6 libxfixes3 libxi6 libxrender1 libxcb1 libxcb-cursor0 libxcb-glx0 libxcb-keysyms1 libxcb-image0 libxcb-shm0 libxcb-icccm4 libxcb-sync1 libxcb-xfixes0 libxcb-shape0 libxcb-randr0 libxcb-render-util0 libxcb-util1 libxcb-xinerama0 libxcb-xkb1 libxkbcommon0 libxkbcommon-x11-0 bison build-essential flex gperf python3 gcc g++ libgl1-mesa-dev libwayland-dev vulkan-sdk + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb + sudo apt update && sudo apt install -y libfontconfig1 libfreetype6 libx11-6 libx11-xcb1 libxext6 libxfixes3 libxi6 libxrender1 libxcb1 libxcb-cursor0 libxcb-glx0 libxcb-keysyms1 libxcb-image0 libxcb-shm0 libxcb-icccm4 libxcb-sync1 libxcb-xfixes0 libxcb-shape0 libxcb-randr0 libxcb-render-util0 libxcb-util1 libxcb-xinerama0 libxcb-xkb1 libxkbcommon0 libxkbcommon-x11-0 bison build-essential flex gperf python3 gcc g++ libgl1-mesa-dev libwayland-dev vulkan-sdk cuda-compiler-12-4 libcublas-dev-12-4 libnvidia-compute-550-server libmysqlclient21 libodbc2 libpq5 - run: name: Installing Qt command: | @@ -235,6 +245,7 @@ jobs: name: Build command: | export CMAKE_PREFIX_PATH=~/Qt/6.5.1/gcc_64/lib/cmake + export PATH=$PATH:/usr/local/cuda/bin ~/Qt/Tools/CMake/bin/cmake -DCMAKE_BUILD_TYPE=Release -S gpt4all-chat -B build ~/Qt/Tools/CMake/bin/cmake --build build --target all @@ -269,6 +280,11 @@ jobs: command: | Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install + - run: + name: Install CUDA Toolkit + command: | + Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe + .\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4 - run: name: Build command: | @@ -394,12 +410,15 @@ jobs: command: | wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list http://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb sudo apt-get update - sudo apt-get install -y cmake build-essential vulkan-sdk + sudo apt-get install -y cmake build-essential vulkan-sdk cuda-compiler-12-4 libcublas-dev-12-4 libnvidia-compute-550-server libmysqlclient21 libodbc2 libpq5 pip install setuptools wheel cmake - run: name: Build C library command: | + export PATH=$PATH:/usr/local/cuda/bin git submodule update --init --recursive cd gpt4all-backend cmake -B build @@ -459,6 +478,11 @@ jobs: command: | Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install + - run: + name: Install CUDA Toolkit + command: | + Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe + .\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4 - run: name: Install dependencies command: @@ -530,11 +554,14 @@ jobs: command: | wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list http://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb + sudo dpkg -i cuda-keyring_1.1-1_all.deb sudo apt-get update - sudo apt-get install -y cmake build-essential vulkan-sdk + sudo apt-get install -y cmake build-essential vulkan-sdk cuda-compiler-12-4 libcublas-dev-12-4 libnvidia-compute-550-server libmysqlclient21 libodbc2 libpq5 - run: name: Build Libraries command: | + export PATH=$PATH:/usr/local/cuda/bin cd gpt4all-backend mkdir -p runtimes/build cd runtimes/build @@ -599,6 +626,11 @@ jobs: command: | Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install + - run: + name: Install CUDA Toolkit + command: | + Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe + .\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4 - run: name: Install dependencies command: | @@ -642,6 +674,11 @@ jobs: command: | Invoke-WebRequest -Uri https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe -OutFile VulkanSDK-1.3.261.1-Installer.exe .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install + - run: + name: Install CUDA Toolkit + command: | + Invoke-WebRequest -Uri https://developer.download.nvidia.com/compute/cuda/12.4.1/network_installers/cuda_12.4.1_windows_network.exe -OutFile cuda_12.4.1_windows_network.exe + .\cuda_12.4.1_windows_network.exe -s cudart_12.4 nvcc_12.4 cublas_12.4 cublas_dev_12.4 - run: name: Install dependencies command: | diff --git a/LICENSE_SOM.txt b/LICENSE_SOM.txt deleted file mode 100644 index eb912c0f..00000000 --- a/LICENSE_SOM.txt +++ /dev/null @@ -1,30 +0,0 @@ -Software for Open Models License (SOM) -Version 1.0 dated August 30th, 2023 - -This license governs use of the accompanying Software. If you use the Software, you accept this license. If you do not accept the license, do not use the Software. - -This license is intended to encourage open release of models created, modified, processed, or otherwise used via the Software under open licensing terms, and should be interpreted in light of that intent. - -1. Definitions -The “Licensor” is the person or entity who is making the Software available under this license. “Software” is the software made available by Licensor under this license. -A “Model” is the output of a machine learning algorithm, and excludes the Software. -“Model Source Materials” must include the Model and model weights, and may include any input data, input data descriptions, documentation or training descriptions for the Model. -“Open Licensing Terms” means: (a) any open source license approved by the Open Source Initiative, or (b) any other terms that make the Model Source Materials publicly available free of charge, and allow recipients to use, modify and distribute the Model Source Materials. Terms described in (b) may include reasonable restrictions such as non-commercial or non-production limitations, or require use in compliance with law. - -2. Grant of Rights. Subject to the conditions and limitations in section 3: -(A) Copyright Grant. Licensor grants you a non-exclusive, worldwide, royalty-free copyright license to copy, modify, and distribute the Software and any modifications of the Software you create under this license. The foregoing license includes without limitation the right to create, modify, and use Models using this Software. - -(B) Patent Grant. Licensor grants you a non-exclusive, worldwide, royalty-free license, under any patents owned or controlled by Licensor, to make, have made, use, sell, offer for sale, import, or otherwise exploit the Software. No license is granted to patent rights that are not embodied in the operation of the Software in the form provided by Licensor. - -3. Conditions and Limitations -(A) Model Licensing and Access. If you use the Software to create, modify, process, or otherwise use any Model, including usage to create inferences with a Model, whether or not you make the Model available to others, you must make that Model Source Materials publicly available under Open Licensing Terms. - -(B) No Re-Licensing. If you redistribute the Software, or modifications to the Software made under the license granted above, you must make it available only under the terms of this license. You may offer additional terms such as warranties, maintenance and support, but You, and not Licensor, are responsible for performing such terms. - -(C) No Trademark License. This license does not grant you rights to use the Licensor’s name, logo, or trademarks. - -(D) If you assert in writing a claim against any person or entity alleging that the use of the Software infringes any patent, all of your licenses to the Software under Section 2 end automatically as of the date you asserted the claim. - -(E) If you distribute any portion of the Software, you must retain all copyright, patent, trademark, and attribution notices that are present in the Software, and you must include a copy of this license. - -(F) The Software is licensed “as-is.” You bear the entire risk of using it. Licensor gives You no express warranties, guarantees or conditions. You may have additional consumer rights under your local laws that this license cannot change. To the extent permitted under your local laws, the Licensor disclaims and excludes the implied warranties of merchantability, fitness for a particular purpose and non-infringement. To the extent this disclaimer is unlawful, you, and not Licensor, are responsible for any liability. diff --git a/gpt4all-backend/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt index 27691d25..dd69e1b7 100644 --- a/gpt4all-backend/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -2,15 +2,23 @@ cmake_minimum_required(VERSION 3.16) set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -if(APPLE) - option(BUILD_UNIVERSAL "Build a Universal binary on macOS" ON) - if(BUILD_UNIVERSAL) +if (APPLE) + option(BUILD_UNIVERSAL "Build a Universal binary on macOS" ON) +else() + option(LLMODEL_KOMPUTE "llmodel: use Kompute" ON) + option(LLMODEL_VULKAN "llmodel: use Vulkan" OFF) + option(LLMODEL_CUDA "llmodel: use CUDA" ON) + option(LLMODEL_ROCM "llmodel: use ROCm" OFF) +endif() + +if (APPLE) + if (BUILD_UNIVERSAL) # Build a Universal binary on macOS # This requires that the found Qt library is compiled as Universal binaries. set(CMAKE_OSX_ARCHITECTURES "arm64;x86_64" CACHE STRING "" FORCE) else() # Build for the host architecture on macOS - if(NOT CMAKE_OSX_ARCHITECTURES) + if (NOT CMAKE_OSX_ARCHITECTURES) set(CMAKE_OSX_ARCHITECTURES "${CMAKE_HOST_SYSTEM_PROCESSOR}" CACHE STRING "" FORCE) endif() endif() @@ -39,11 +47,35 @@ else() message(STATUS "Interprocedural optimization support detected") endif() +set(DIRECTORY llama.cpp-mainline) include(llama.cpp.cmake) -set(BUILD_VARIANTS default avxonly) -if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - set(BUILD_VARIANTS ${BUILD_VARIANTS} metal) +set(BUILD_VARIANTS) +set(GPTJ_BUILD_VARIANT cpu) +if (APPLE) + list(APPEND BUILD_VARIANTS metal) +endif() +if (LLMODEL_KOMPUTE) + list(APPEND BUILD_VARIANTS kompute kompute-avxonly) + set(GPTJ_BUILD_VARIANT kompute) +else() + list(PREPEND BUILD_VARIANTS cpu cpu-avxonly) +endif() +if (LLMODEL_VULKAN) + list(APPEND BUILD_VARIANTS vulkan vulkan-avxonly) +endif() +if (LLMODEL_CUDA) + include(CheckLanguage) + check_language(CUDA) + if (NOT CMAKE_CUDA_COMPILER) + message(WARNING "CUDA Toolkit not found. To build without CUDA, use -DLLMODEL_CUDA=OFF.") + endif() + enable_language(CUDA) + list(APPEND BUILD_VARIANTS cuda cuda-avxonly) +endif() +if (LLMODEL_ROCM) + enable_language(HIP) + list(APPEND BUILD_VARIANTS rocm rocm-avxonly) endif() set(CMAKE_VERBOSE_MAKEFILE ON) @@ -51,24 +83,34 @@ set(CMAKE_VERBOSE_MAKEFILE ON) # Go through each build variant foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS) # Determine flags - if (BUILD_VARIANT STREQUAL avxonly) - set(GPT4ALL_ALLOW_NON_AVX NO) + if (BUILD_VARIANT MATCHES avxonly) + set(GPT4ALL_ALLOW_NON_AVX OFF) else() - set(GPT4ALL_ALLOW_NON_AVX YES) + set(GPT4ALL_ALLOW_NON_AVX ON) endif() set(LLAMA_AVX2 ${GPT4ALL_ALLOW_NON_AVX}) set(LLAMA_F16C ${GPT4ALL_ALLOW_NON_AVX}) set(LLAMA_FMA ${GPT4ALL_ALLOW_NON_AVX}) - if (BUILD_VARIANT STREQUAL metal) - set(LLAMA_METAL YES) - else() - set(LLAMA_METAL NO) + set(LLAMA_METAL OFF) + set(LLAMA_KOMPUTE OFF) + set(LLAMA_VULKAN OFF) + set(LLAMA_CUDA OFF) + set(LLAMA_ROCM OFF) + if (BUILD_VARIANT MATCHES metal) + set(LLAMA_METAL ON) + elseif (BUILD_VARIANT MATCHES kompute) + set(LLAMA_KOMPUTE ON) + elseif (BUILD_VARIANT MATCHES vulkan) + set(LLAMA_VULKAN ON) + elseif (BUILD_VARIANT MATCHES cuda) + set(LLAMA_CUDA ON) + elseif (BUILD_VARIANT MATCHES rocm) + set(LLAMA_HIPBLAS ON) endif() # Include GGML - set(LLAMA_K_QUANTS YES) - include_ggml(llama.cpp-mainline -mainline-${BUILD_VARIANT} ON) + include_ggml(-mainline-${BUILD_VARIANT}) # Function for preparing individual implementations function(prepare_target TARGET_NAME BASE_LIB) @@ -93,11 +135,15 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS) LLAMA_VERSIONS=>=3 LLAMA_DATE=999999) prepare_target(llamamodel-mainline llama-mainline) - if (NOT LLAMA_METAL) + if (BUILD_VARIANT MATCHES ${GPTJ_BUILD_VARIANT}) add_library(gptj-${BUILD_VARIANT} SHARED gptj.cpp utils.h utils.cpp llmodel_shared.cpp llmodel_shared.h) prepare_target(gptj llama-mainline) endif() + + if (BUILD_VARIANT STREQUAL cuda) + set(CUDAToolkit_BIN_DIR ${CUDAToolkit_BIN_DIR} PARENT_SCOPE) + endif() endforeach() add_library(llmodel diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index a3f03b7e..40bac11e 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit a3f03b7e793ee611c4918235d4532ee535a9530d +Subproject commit 40bac11e427f2307305b86c322cb366bb95fcb8a diff --git a/gpt4all-backend/llama.cpp.cmake b/gpt4all-backend/llama.cpp.cmake index 0bb79313..4bb3d283 100644 --- a/gpt4all-backend/llama.cpp.cmake +++ b/gpt4all-backend/llama.cpp.cmake @@ -1,69 +1,25 @@ -# -# Copyright (c) 2023 Nomic, Inc. All rights reserved. -# -# This software is licensed under the terms of the Software for Open Models License (SOM), -# version 1.0, as detailed in the LICENSE_SOM.txt file. A copy of this license should accompany -# this software. Except as expressly granted in the SOM license, all rights are reserved by Nomic, Inc. -# - -cmake_minimum_required(VERSION 3.12) # Don't bump this version for no reason - -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE) - set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") -endif() +cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories. set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) -if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) - set(LLAMA_STANDALONE ON) - - # configure project version - # TODO -else() - set(LLAMA_STANDALONE OFF) -endif() - -if (EMSCRIPTEN) - set(BUILD_SHARED_LIBS_DEFAULT OFF) - - option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON) -else() - if (MINGW) - set(BUILD_SHARED_LIBS_DEFAULT OFF) - else() - set(BUILD_SHARED_LIBS_DEFAULT ON) - endif() -endif() - -if (APPLE) - set(LLAMA_KOMPUTE_DEFAULT OFF) -else() - set(LLAMA_KOMPUTE_DEFAULT ON) -endif() - - # # Option list # # some of the options here are commented out so they can be set "dynamically" before calling include_ggml() +set(LLAMA_LLAMAFILE_DEFAULT ON) + # general -option(LLAMA_STATIC "llama: static link libraries" OFF) -option(LLAMA_NATIVE "llama: enable -march=native flag" OFF) -option(LLAMA_LTO "llama: enable link time optimization" OFF) +option(LLAMA_STATIC "llama: static link libraries" OFF) +option(LLAMA_NATIVE "llama: enable -march=native flag" OFF) # debug -option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON) -option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF) -option(LLAMA_GPROF "llama: enable gprof" OFF) +option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON) +option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF) +option(LLAMA_GPROF "llama: enable gprof" OFF) -# sanitizers -option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF) -option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF) -option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF) +# build +option(LLAMA_FATAL_WARNINGS "llama: enable -Werror flag" OFF) # instruction set specific #option(LLAMA_AVX "llama: enable AVX" ON) @@ -77,41 +33,58 @@ option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" # option(LLAMA_F16C "llama: enable F16C" ON) #endif() +if (WIN32) + set(LLAMA_WIN_VER "0x602" CACHE STRING "llama: Windows Version") +endif() + # 3rd party libs -option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON) -option(LLAMA_OPENBLAS "llama: use OpenBLAS" OFF) -#option(LLAMA_CUBLAS "llama: use cuBLAS" OFF) -#option(LLAMA_CLBLAST "llama: use CLBlast" OFF) -#option(LLAMA_METAL "llama: use Metal" OFF) -option(LLAMA_KOMPUTE "llama: use Kompute" ${LLAMA_KOMPUTE_DEFAULT}) +option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON) +option(LLAMA_BLAS "llama: use BLAS" OFF) +option(LLAMA_LLAMAFILE "llama: use llamafile SGEMM" ${LLAMA_LLAMAFILE_DEFAULT}) set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor") -set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") -set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels") +#option(LLAMA_CUDA "llama: use CUDA" OFF) +option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) +option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) +set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") +set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels") +option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF) +set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K") +set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING + "llama: max. batch size for using peer access") +option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF) +#option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF) +option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF) +#option(LLAMA_CLBLAST "llama: use CLBlast" OFF) +#option(LLAMA_VULKAN "llama: use Vulkan" OFF) +option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF) +option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF) +option(LLAMA_VULKAN_VALIDATE "llama: enable Vulkan validation" OFF) +option(LLAMA_VULKAN_RUN_TESTS "llama: run Vulkan tests" OFF) +#option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT}) +option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF) +option(LLAMA_METAL_SHADER_DEBUG "llama: compile Metal with -fno-fast-math" OFF) +set(LLAMA_METAL_MACOSX_VERSION_MIN "" CACHE STRING + "llama: metal minimum macOS version") +set(LLAMA_METAL_STD "" CACHE STRING "llama: metal standard version (-std flag)") +#option(LLAMA_KOMPUTE "llama: use Kompute" OFF) +option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF) +set(LLAMA_SCHED_MAX_COPIES "4" CACHE STRING "llama: max input copies for pipeline parallelism") + +# add perf arguments +option(LLAMA_PERF "llama: enable perf" OFF) # # Compile flags # -set(CMAKE_C_STANDARD 11) -set(CMAKE_C_STANDARD_REQUIRED true) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) -if (NOT MSVC) - if (LLAMA_SANITIZE_THREAD) - add_compile_options(-fsanitize=thread) - link_libraries(-fsanitize=thread) - endif() +list(APPEND GGML_COMPILE_DEFS GGML_SCHED_MAX_COPIES=${LLAMA_SCHED_MAX_COPIES}) - if (LLAMA_SANITIZE_ADDRESS) - add_compile_options(-fsanitize=address -fno-omit-frame-pointer) - link_libraries(-fsanitize=address) - endif() - - if (LLAMA_SANITIZE_UNDEFINED) - add_compile_options(-fsanitize=undefined) - link_libraries(-fsanitize=undefined) - endif() +# enable libstdc++ assertions for debug builds +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + list(APPEND GGML_COMPILE_DEFS $<$:_GLIBCXX_ASSERTIONS>) endif() if (APPLE AND LLAMA_ACCELERATE) @@ -119,247 +92,189 @@ if (APPLE AND LLAMA_ACCELERATE) if (ACCELERATE_FRAMEWORK) message(STATUS "Accelerate framework found") - add_compile_definitions(GGML_USE_ACCELERATE) + list(APPEND GGML_COMPILE_DEFS GGML_USE_ACCELERATE) + list(APPEND GGML_COMPILE_DEFS ACCELERATE_NEW_LAPACK) + list(APPEND GGML_COMPILE_DEFS ACCELERATE_LAPACK_ILP64) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK}) else() message(WARNING "Accelerate framework not found") endif() endif() -if (LLAMA_OPENBLAS) +if (LLAMA_BLAS) if (LLAMA_STATIC) set(BLA_STATIC ON) endif() + if ($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22) + set(BLA_SIZEOF_INTEGER 8) + endif() - set(BLA_VENDOR OpenBLAS) + set(BLA_VENDOR ${LLAMA_BLAS_VENDOR}) find_package(BLAS) + if (BLAS_FOUND) - message(STATUS "OpenBLAS found") + message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") - add_compile_definitions(GGML_USE_OPENBLAS) - add_link_options(${BLAS_LIBRARIES}) - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} openblas) + if ("${BLAS_INCLUDE_DIRS}" STREQUAL "") + # BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake. + # see https://gitlab.kitware.com/cmake/cmake/-/issues/20268 + find_package(PkgConfig REQUIRED) + if (${LLAMA_BLAS_VENDOR} MATCHES "Generic") + pkg_check_modules(DepBLAS REQUIRED blas) + elseif (${LLAMA_BLAS_VENDOR} MATCHES "OpenBLAS") + # As of openblas v0.3.22, the 64-bit is named openblas64.pc + pkg_check_modules(DepBLAS openblas64) + if (NOT DepBLAS_FOUND) + pkg_check_modules(DepBLAS REQUIRED openblas) + endif() + elseif (${LLAMA_BLAS_VENDOR} MATCHES "FLAME") + pkg_check_modules(DepBLAS REQUIRED blis) + elseif (${LLAMA_BLAS_VENDOR} MATCHES "ATLAS") + pkg_check_modules(DepBLAS REQUIRED blas-atlas) + elseif (${LLAMA_BLAS_VENDOR} MATCHES "FlexiBLAS") + pkg_check_modules(DepBLAS REQUIRED flexiblas_api) + elseif (${LLAMA_BLAS_VENDOR} MATCHES "Intel") + # all Intel* libraries share the same include path + pkg_check_modules(DepBLAS REQUIRED mkl-sdl) + elseif (${LLAMA_BLAS_VENDOR} MATCHES "NVHPC") + # this doesn't provide pkg-config + # suggest to assign BLAS_INCLUDE_DIRS on your own + if ("${NVHPC_VERSION}" STREQUAL "") + message(WARNING "Better to set NVHPC_VERSION") + else() + set(DepBLAS_FOUND ON) + set(DepBLAS_INCLUDE_DIRS "/opt/nvidia/hpc_sdk/${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}/${NVHPC_VERSION}/math_libs/include") + endif() + endif() + if (DepBLAS_FOUND) + set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS}) + else() + message(WARNING "BLAS_INCLUDE_DIRS neither been provided nor been automatically" + " detected by pkgconfig, trying to find cblas.h from possible paths...") + find_path(BLAS_INCLUDE_DIRS + NAMES cblas.h + HINTS + /usr/include + /usr/local/include + /usr/include/openblas + /opt/homebrew/opt/openblas/include + /usr/local/opt/openblas/include + /usr/include/x86_64-linux-gnu/openblas/include + ) + endif() + endif() - # find header file - set(OPENBLAS_INCLUDE_SEARCH_PATHS - /usr/include - /usr/include/openblas - /usr/include/openblas-base - /usr/local/include - /usr/local/include/openblas - /usr/local/include/openblas-base - /opt/OpenBLAS/include - $ENV{OpenBLAS_HOME} - $ENV{OpenBLAS_HOME}/include - ) - find_path(OPENBLAS_INC NAMES cblas.h PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS}) - add_compile_options(-I${OPENBLAS_INC}) + message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") + + list(APPEND GGML_COMPILE_OPTS ${BLAS_LINKER_FLAGS}) + + list(APPEND GGML_COMPILE_DEFS GGML_USE_OPENBLAS) + + if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel")) + list(APPEND GGML_COMPILE_DEFS GGML_BLAS_USE_MKL) + endif() + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${BLAS_LIBRARIES}) + set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS}) else() - message(WARNING "OpenBLAS not found") + message(WARNING "BLAS not found, please refer to " + "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" + " to set correct LLAMA_BLAS_VENDOR") endif() endif() -if (LLAMA_KOMPUTE) - set(LLAMA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/llama.cpp-mainline) - if (NOT EXISTS "${LLAMA_DIR}/kompute/CMakeLists.txt") - message(FATAL_ERROR "Kompute not found") - endif() - message(STATUS "Kompute found") +if (LLAMA_LLAMAFILE) + list(APPEND GGML_COMPILE_DEFS GGML_USE_LLAMAFILE) - add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) - find_package(Vulkan COMPONENTS glslc REQUIRED) - find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc) - if (NOT glslc_executable) - message(FATAL_ERROR "glslc not found") - endif() + set(GGML_HEADERS_LLAMAFILE ${DIRECTORY}/sgemm.h) + set(GGML_SOURCES_LLAMAFILE ${DIRECTORY}/sgemm.cpp) +endif() - function(compile_shader) - set(options) - set(oneValueArgs) - set(multiValueArgs SOURCES) - cmake_parse_arguments(compile_shader "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - foreach(source ${compile_shader_SOURCES}) - get_filename_component(OP_FILE ${source} NAME) - set(spv_file ${CMAKE_CURRENT_BINARY_DIR}/${OP_FILE}.spv) - add_custom_command( - OUTPUT ${spv_file} - DEPENDS ${LLAMA_DIR}/${source} - ${LLAMA_DIR}/kompute-shaders/common.comp - ${LLAMA_DIR}/kompute-shaders/op_getrows.comp - ${LLAMA_DIR}/kompute-shaders/op_mul_mv_q_n_pre.comp - ${LLAMA_DIR}/kompute-shaders/op_mul_mv_q_n.comp - COMMAND ${glslc_executable} --target-env=vulkan1.2 -o ${spv_file} ${LLAMA_DIR}/${source} - COMMENT "Compiling ${source} to ${source}.spv" +if (LLAMA_QKK_64) + list(APPEND GGML_COMPILE_DEFS GGML_QKK_64) +endif() + +if (LLAMA_PERF) + list(APPEND GGML_COMPILE_DEFS GGML_PERF) +endif() + +function(get_flags CCID CCVER) + set(C_FLAGS "") + set(CXX_FLAGS "") + + if (CCID MATCHES "Clang") + set(C_FLAGS -Wunreachable-code-break -Wunreachable-code-return) + set(CXX_FLAGS -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi) + + if ( + (CCID STREQUAL "Clang" AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR + (CCID STREQUAL "AppleClang" AND CCVER VERSION_GREATER_EQUAL 7.3.0) ) - - get_filename_component(RAW_FILE_NAME ${spv_file} NAME) - set(FILE_NAME "shader${RAW_FILE_NAME}") - string(REPLACE ".comp.spv" ".h" HEADER_FILE ${FILE_NAME}) - string(TOUPPER ${HEADER_FILE} HEADER_FILE_DEFINE) - string(REPLACE "." "_" HEADER_FILE_DEFINE "${HEADER_FILE_DEFINE}") - set(OUTPUT_HEADER_FILE "${HEADER_FILE}") - message(STATUS "${HEADER_FILE} generating ${HEADER_FILE_DEFINE}") - if(CMAKE_GENERATOR MATCHES "Visual Studio") - add_custom_command( - OUTPUT ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_BINARY_DIR}/bin/$/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} - DEPENDS ${spv_file} xxd - COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/$/xxd" - ) - else() - add_custom_command( - OUTPUT ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_BINARY_DIR}/bin/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE} - COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} - DEPENDS ${spv_file} xxd - COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/xxd" - ) + list(APPEND C_FLAGS -Wdouble-promotion) endif() - endforeach() - endfunction() + elseif (CCID STREQUAL "GNU") + set(C_FLAGS -Wdouble-promotion) + set(CXX_FLAGS -Wno-array-bounds) - set(KOMPUTE_OPT_LOG_LEVEL Critical CACHE STRING "Kompute log level") - add_subdirectory(${LLAMA_DIR}/kompute) + if (CCVER VERSION_GREATER_EQUAL 7.1.0) + list(APPEND CXX_FLAGS -Wno-format-truncation) + endif() + if (CCVER VERSION_GREATER_EQUAL 8.1.0) + list(APPEND CXX_FLAGS -Wextra-semi) + endif() + endif() - # Compile our shaders - compile_shader(SOURCES - kompute-shaders/op_scale.comp - kompute-shaders/op_scale_8.comp - kompute-shaders/op_add.comp - kompute-shaders/op_addrow.comp - kompute-shaders/op_mul.comp - kompute-shaders/op_silu.comp - kompute-shaders/op_relu.comp - kompute-shaders/op_gelu.comp - kompute-shaders/op_softmax.comp - kompute-shaders/op_norm.comp - kompute-shaders/op_rmsnorm.comp - kompute-shaders/op_diagmask.comp - kompute-shaders/op_mul_mat_mat_f32.comp - kompute-shaders/op_mul_mat_f16.comp - kompute-shaders/op_mul_mat_q8_0.comp - kompute-shaders/op_mul_mat_q4_0.comp - kompute-shaders/op_mul_mat_q4_1.comp - kompute-shaders/op_mul_mat_q6_k.comp - kompute-shaders/op_getrows_f16.comp - kompute-shaders/op_getrows_q4_0.comp - kompute-shaders/op_getrows_q4_1.comp - kompute-shaders/op_getrows_q6_k.comp - kompute-shaders/op_rope_f16.comp - kompute-shaders/op_rope_f32.comp - kompute-shaders/op_cpy_f16_f16.comp - kompute-shaders/op_cpy_f16_f32.comp - kompute-shaders/op_cpy_f32_f16.comp - kompute-shaders/op_cpy_f32_f32.comp - ) + set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) + set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) +endfunction() - # Create a custom target for our generated shaders - add_custom_target(generated_shaders DEPENDS - shaderop_scale.h - shaderop_scale_8.h - shaderop_add.h - shaderop_addrow.h - shaderop_mul.h - shaderop_silu.h - shaderop_relu.h - shaderop_gelu.h - shaderop_softmax.h - shaderop_norm.h - shaderop_rmsnorm.h - shaderop_diagmask.h - shaderop_mul_mat_mat_f32.h - shaderop_mul_mat_f16.h - shaderop_mul_mat_q8_0.h - shaderop_mul_mat_q4_0.h - shaderop_mul_mat_q4_1.h - shaderop_mul_mat_q6_k.h - shaderop_getrows_f16.h - shaderop_getrows_q4_0.h - shaderop_getrows_q4_1.h - shaderop_getrows_q6_k.h - shaderop_rope_f16.h - shaderop_rope_f32.h - shaderop_cpy_f16_f16.h - shaderop_cpy_f16_f32.h - shaderop_cpy_f32_f16.h - shaderop_cpy_f32_f32.h - ) - - # Create a custom command that depends on the generated_shaders - add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp - COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp - DEPENDS generated_shaders - COMMENT "Ensuring shaders are generated before compiling ggml-kompute.cpp" - ) - - # Add the stamp to the main sources to ensure dependency tracking - set(GGML_SOURCES_KOMPUTE ${LLAMA_DIR}/ggml-kompute.cpp ${LLAMA_DIR}/ggml-kompute.h ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp) - add_compile_definitions(GGML_USE_KOMPUTE) - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute) - set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${CMAKE_BINARY_DIR}) +if (LLAMA_FATAL_WARNINGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND C_FLAGS -Werror) + list(APPEND CXX_FLAGS -Werror) + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + list(APPEND GGML_COMPILE_OPTS /WX) + endif() endif() if (LLAMA_ALL_WARNINGS) if (NOT MSVC) - set(c_flags - -Wall - -Wextra - -Wpedantic - -Wcast-qual - -Wdouble-promotion - -Wshadow - -Wstrict-prototypes - -Wpointer-arith - ) - set(cxx_flags - -Wall - -Wextra - -Wpedantic - -Wcast-qual - -Wno-unused-function - -Wno-multichar - ) + list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function) + list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes + -Werror=implicit-int -Werror=implicit-function-declaration) + list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn) + + list(APPEND C_FLAGS ${WARNING_FLAGS}) + list(APPEND CXX_FLAGS ${WARNING_FLAGS}) + + get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) + + list(APPEND GGML_COMPILE_OPTS "$<$:${C_FLAGS};${GF_C_FLAGS}>" + "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") else() # todo : msvc + set(C_FLAGS "") + set(CXX_FLAGS "") endif() - - add_compile_options( - "$<$:${c_flags}>" - "$<$:${cxx_flags}>" - ) - endif() -if (MSVC) - add_compile_definitions(_CRT_SECURE_NO_WARNINGS) +if (WIN32) + list(APPEND GGML_COMPILE_DEFS _CRT_SECURE_NO_WARNINGS) if (BUILD_SHARED_LIBS) set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) endif() endif() -if (LLAMA_LTO) - include(CheckIPOSupported) - check_ipo_supported(RESULT result OUTPUT output) - if (result) - set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) - else() - message(WARNING "IPO is not supported: ${output}") - endif() +# this version of Apple ld64 is buggy +execute_process( + COMMAND ${CMAKE_C_COMPILER} ${CMAKE_EXE_LINKER_FLAGS} -Wl,-v + ERROR_VARIABLE output + OUTPUT_QUIET +) + +if (output MATCHES "dyld-1015\.7") + list(APPEND GGML_COMPILE_DEFS HAVE_BUGGY_APPLE_LINKER) endif() # Architecture specific @@ -367,109 +282,27 @@ endif() # feel free to update the Makefile for your architecture and send a pull request or issue message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") if (MSVC) - string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) - message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") + string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) + message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") else () - set(CMAKE_GENERATOR_PLATFORM_LWR "") + set(CMAKE_GENERATOR_PLATFORM_LWR "") endif () if (NOT MSVC) if (LLAMA_STATIC) - add_link_options(-static) + list(APPEND GGML_LINK_OPTS -static) if (MINGW) - add_link_options(-static-libgcc -static-libstdc++) + list(APPEND GGML_LINK_OPTS -static-libgcc -static-libstdc++) endif() endif() if (LLAMA_GPROF) - add_compile_options(-pg) - endif() - if (LLAMA_NATIVE) - add_compile_options(-march=native) + list(APPEND GGML_COMPILE_OPTS -pg) endif() endif() -if ((${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") OR (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") OR ("${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "arm64")) - message(STATUS "ARM detected") - if (MSVC) - add_compile_definitions(__ARM_NEON) - add_compile_definitions(__ARM_FEATURE_FMA) - add_compile_definitions(__ARM_FEATURE_DOTPROD) - # add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) # MSVC doesn't support vdupq_n_f16, vld1q_f16, vst1q_f16 - add_compile_definitions(__aarch64__) # MSVC defines _M_ARM64 instead - else() - include(CheckCXXCompilerFlag) - check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E) - if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") - add_compile_options(-mfp16-format=ieee) - endif() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6") - # Raspberry Pi 1, Zero - add_compile_options(-mfpu=neon-fp-armv8 -mno-unaligned-access) - endif() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7") - # Raspberry Pi 2 - add_compile_options(-mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations) - endif() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8") - # Raspberry Pi 3, 4, Zero 2 (32-bit) - add_compile_options(-mno-unaligned-access) - endif() - endif() -elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$" OR "${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "^(x86_64|i686|amd64|x64)$" ) - message(STATUS "x86 detected") - if (MSVC) - if (LLAMA_AVX512) - add_compile_options($<$:/arch:AVX512>) - add_compile_options($<$:/arch:AVX512>) - # MSVC has no compile-time flags enabling specific - # AVX512 extensions, neither it defines the - # macros corresponding to the extensions. - # Do it manually. - if (LLAMA_AVX512_VBMI) - add_compile_definitions($<$:__AVX512VBMI__>) - add_compile_definitions($<$:__AVX512VBMI__>) - endif() - if (LLAMA_AVX512_VNNI) - add_compile_definitions($<$:__AVX512VNNI__>) - add_compile_definitions($<$:__AVX512VNNI__>) - endif() - elseif (LLAMA_AVX2) - add_compile_options($<$:/arch:AVX2>) - add_compile_options($<$:/arch:AVX2>) - elseif (LLAMA_AVX) - add_compile_options($<$:/arch:AVX>) - add_compile_options($<$:/arch:AVX>) - endif() - else() - if (LLAMA_F16C) - add_compile_options(-mf16c) - endif() - if (LLAMA_FMA) - add_compile_options(-mfma) - endif() - if (LLAMA_AVX) - add_compile_options(-mavx) - endif() - if (LLAMA_AVX2) - add_compile_options(-mavx2) - endif() - if (LLAMA_AVX512) - add_compile_options(-mavx512f) - add_compile_options(-mavx512bw) - endif() - if (LLAMA_AVX512_VBMI) - add_compile_options(-mavx512vbmi) - endif() - if (LLAMA_AVX512_VNNI) - add_compile_options(-mavx512vnni) - endif() - endif() -elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") - message(STATUS "PowerPC detected") - add_compile_options(-mcpu=native -mtune=native) - #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) -else() - message(STATUS "Unknown architecture") +if (MINGW) + # Target Windows 8 for PrefetchVirtualMemory + list(APPEND GGML_COMPILE_DEFS _WIN32_WINNT=${LLAMA_WIN_VER}) endif() # @@ -480,20 +313,20 @@ endif() # CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional # posix_memalign came in POSIX.1-2001 / SUSv3 # M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985) -add_compile_definitions(_XOPEN_SOURCE=600) +list(APPEND GGML_COMPILE_DEFS _XOPEN_SOURCE=600) # Somehow in OpenBSD whenever POSIX conformance is specified # some string functions rely on locale_t availability, # which was introduced in POSIX.1-2008, forcing us to go higher if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") - remove_definitions(-D_XOPEN_SOURCE=600) - add_compile_definitions(_XOPEN_SOURCE=700) + list(REMOVE_ITEM GGML_COMPILE_DEFS _XOPEN_SOURCE=600) + list(APPEND GGML_COMPILE_DEFS _XOPEN_SOURCE=700) endif() # Data types, macros and functions related to controlling CPU affinity and # some memory allocation are available on Linux through GNU extensions in libc if (CMAKE_SYSTEM_NAME MATCHES "Linux") - add_compile_definitions(_GNU_SOURCE) + list(APPEND GGML_COMPILE_DEFS _GNU_SOURCE) endif() # RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1, @@ -505,95 +338,593 @@ if ( CMAKE_SYSTEM_NAME MATCHES "tvOS" OR CMAKE_SYSTEM_NAME MATCHES "DragonFly" ) - add_compile_definitions(_DARWIN_C_SOURCE) + list(APPEND GGML_COMPILE_DEFS _DARWIN_C_SOURCE) endif() # alloca is a non-standard interface that is not visible on BSDs when # POSIX conformance is specified, but not all of them provide a clean way # to enable it in such cases if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") - add_compile_definitions(__BSD_VISIBLE) + list(APPEND GGML_COMPILE_DEFS __BSD_VISIBLE) endif() if (CMAKE_SYSTEM_NAME MATCHES "NetBSD") - add_compile_definitions(_NETBSD_SOURCE) + list(APPEND GGML_COMPILE_DEFS _NETBSD_SOURCE) endif() if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") - add_compile_definitions(_BSD_SOURCE) + list(APPEND GGML_COMPILE_DEFS _BSD_SOURCE) endif() -function(include_ggml DIRECTORY SUFFIX WITH_LLAMA) +function(include_ggml SUFFIX) message(STATUS "Configuring ggml implementation target llama${SUFFIX} in ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}") # - # Build libraries + # libraries # - set(GGML_CUBLAS_USE NO) - if (LLAMA_CUBLAS) + if (LLAMA_CUDA) cmake_minimum_required(VERSION 3.17) + get_property(LANGS GLOBAL PROPERTY ENABLED_LANGUAGES) + if (NOT CUDA IN_LIST LANGS) + message(FATAL_ERROR "The CUDA language must be enabled.") + endif() - find_package(CUDAToolkit) - if (CUDAToolkit_FOUND) - set(GGML_CUBLAS_USE YES) - message(STATUS "cuBLAS found") + find_package(CUDAToolkit REQUIRED) + set(CUDAToolkit_BIN_DIR ${CUDAToolkit_BIN_DIR} PARENT_SCOPE) - enable_language(CUDA) + set(GGML_HEADERS_CUDA ${DIRECTORY}/ggml-cuda.h) - set(GGML_SOURCES_CUDA ${DIRECTORY}/ggml-cuda.cu ${DIRECTORY}/ggml-cuda.h) + file(GLOB GGML_SOURCES_CUDA "${DIRECTORY}/ggml-cuda/*.cu") + list(APPEND GGML_SOURCES_CUDA "${DIRECTORY}/ggml-cuda.cu") - if (LLAMA_STATIC) + list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_CUDA) + if (LLAMA_CUDA_FORCE_DMMV) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_DMMV) + endif() + if (LLAMA_CUDA_FORCE_MMQ) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_MMQ) + endif() + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) + if (LLAMA_CUDA_F16) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_F16) + endif() + list(APPEND GGML_COMPILE_DEFS K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER}) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_PEER_MAX_BATCH_SIZE=${LLAMA_CUDA_PEER_MAX_BATCH_SIZE}) + if (LLAMA_CUDA_NO_PEER_COPY) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_NO_PEER_COPY) + endif() + + if (LLAMA_STATIC) + if (WIN32) + # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt) + else () set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) + endif() + else() + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) + endif() + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver) + + if (DEFINED CMAKE_CUDA_ARCHITECTURES) + set(GGML_CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}") + else() + # 52 == lowest CUDA 12 standard + # 60 == f16 CUDA intrinsics + # 61 == integer CUDA intrinsics + # 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster + if (LLAMA_CUDA_F16 OR LLAMA_CUDA_DMMV_F16) + set(GGML_CUDA_ARCHITECTURES "60;61;70") # needed for f16 CUDA intrinsics else() - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) + set(GGML_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics + #set(GGML_CUDA_ARCHITECTURES "") # use this to compile much faster, but only F16 models work endif() - - else() - message(WARNING "cuBLAS not found") endif() + message(STATUS "Using CUDA architectures: ${GGML_CUDA_ARCHITECTURES}") endif() - set(GGML_CLBLAST_USE NO) if (LLAMA_CLBLAST) - find_package(CLBlast) - if (CLBlast_FOUND) - set(GGML_CLBLAST_USE YES) - message(STATUS "CLBlast found") + find_package(CLBlast REQUIRED) - set(GGML_OPENCL_SOURCE_FILE ggml-opencl.cpp) - if (NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}/${GGML_OPENCL_SOURCE_FILE}) - set(GGML_OPENCL_SOURCE_FILE ggml-opencl.c) + set(GGML_HEADERS_OPENCL ${DIRECTORY}/ggml-opencl.h) + set(GGML_SOURCES_OPENCL ${DIRECTORY}/ggml-opencl.cpp) + + list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_CLBLAST) + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} clblast) + endif() + + if (LLAMA_VULKAN) + find_package(Vulkan REQUIRED) + + set(GGML_HEADERS_VULKAN ${DIRECTORY}/ggml-vulkan.h) + set(GGML_SOURCES_VULKAN ${DIRECTORY}/ggml-vulkan.cpp) + + list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_VULKAN) + + if (LLAMA_VULKAN_CHECK_RESULTS) + list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_CHECK_RESULTS) + endif() + + if (LLAMA_VULKAN_DEBUG) + list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_DEBUG) + endif() + + if (LLAMA_VULKAN_VALIDATE) + list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_VALIDATE) + endif() + + if (LLAMA_VULKAN_RUN_TESTS) + list(APPEND GGML_COMPILE_DEFS GGML_VULKAN_RUN_TESTS) + endif() + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} Vulkan::Vulkan) + endif() + + if (LLAMA_HIPBLAS) + if ($ENV{ROCM_PATH}) + set(ROCM_PATH $ENV{ROCM_PATH}) + else() + set(ROCM_PATH /opt/rocm) + endif() + list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}) + + string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}") + + if (CXX_IS_HIPCC AND UNIX) + message(WARNING "Setting hipcc as the C++ compiler is legacy behavior." + " Prefer setting the HIP compiler directly. See README for details.") + else() + # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES. + if (AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) + set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_ARGETS}) + endif() + cmake_minimum_required(VERSION 3.21) + get_property(LANGS GLOBAL PROPERTY ENABLED_LANGUAGES) + if (NOT HIP IN_LIST LANGS) + message(FATAL_ERROR "The HIP language must be enabled.") + endif() + endif() + find_package(hip REQUIRED) + find_package(hipblas REQUIRED) + find_package(rocblas REQUIRED) + + message(STATUS "HIP and hipBLAS found") + + set(GGML_HEADERS_ROCM ${DIRECTORY}/ggml-cuda.h) + + file(GLOB GGML_SOURCES_ROCM "${DIRECTORY}/ggml-rocm/*.cu") + list(APPEND GGML_SOURCES_ROCM "${DIRECTORY}/ggml-rocm.cu") + + list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_HIPBLAS GGML_USE_CUDA) + + if (LLAMA_HIP_UMA) + list(APPEND GGML_COMPILE_DEFS GGML_HIP_UMA) + endif() + + if (LLAMA_CUDA_FORCE_DMMV) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_DMMV) + endif() + + if (LLAMA_CUDA_FORCE_MMQ) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_FORCE_MMQ) + endif() + + if (LLAMA_CUDA_NO_PEER_COPY) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_NO_PEER_COPY) + endif() + + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) + list(APPEND GGML_COMPILE_DEFS GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) + list(APPEND GGML_COMPILE_DEFS K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER}) + + if (CXX_IS_HIPCC) + set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device) + else() + set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP) + endif() + + if (LLAMA_STATIC) + message(FATAL_ERROR "Static linking not supported for HIP/ROCm") + endif() + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} PUBLIC hip::host roc::rocblas roc::hipblas) + endif() + + set(LLAMA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}) + + if (LLAMA_KOMPUTE AND NOT GGML_KOMPUTE_ONCE) + set(GGML_KOMPUTE_ONCE ON PARENT_SCOPE) + if (NOT EXISTS "${LLAMA_DIR}/kompute/CMakeLists.txt") + message(FATAL_ERROR "Kompute not found") + endif() + message(STATUS "Kompute found") + + find_package(Vulkan COMPONENTS glslc) + if (NOT Vulkan_FOUND) + message(FATAL_ERROR "Vulkan not found. To build without Vulkan, use -DLLMODEL_KOMPUTE=OFF.") + endif() + find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc) + if (NOT glslc_executable) + message(FATAL_ERROR "glslc not found. To build without Vulkan, use -DLLMODEL_KOMPUTE=OFF.") + endif() + + function(compile_shader) + set(options) + set(oneValueArgs) + set(multiValueArgs SOURCES) + cmake_parse_arguments(compile_shader "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + foreach(source ${compile_shader_SOURCES}) + get_filename_component(OP_FILE ${source} NAME) + set(spv_file ${CMAKE_CURRENT_BINARY_DIR}/${OP_FILE}.spv) + add_custom_command( + OUTPUT ${spv_file} + DEPENDS ${LLAMA_DIR}/${source} + ${LLAMA_DIR}/kompute-shaders/common.comp + ${LLAMA_DIR}/kompute-shaders/op_getrows.comp + ${LLAMA_DIR}/kompute-shaders/op_mul_mv_q_n_pre.comp + ${LLAMA_DIR}/kompute-shaders/op_mul_mv_q_n.comp + COMMAND ${glslc_executable} --target-env=vulkan1.2 -o ${spv_file} ${LLAMA_DIR}/${source} + COMMENT "Compiling ${source} to ${source}.spv" + ) + + get_filename_component(RAW_FILE_NAME ${spv_file} NAME) + set(FILE_NAME "shader${RAW_FILE_NAME}") + string(REPLACE ".comp.spv" ".h" HEADER_FILE ${FILE_NAME}) + string(TOUPPER ${HEADER_FILE} HEADER_FILE_DEFINE) + string(REPLACE "." "_" HEADER_FILE_DEFINE "${HEADER_FILE_DEFINE}") + set(OUTPUT_HEADER_FILE "${HEADER_FILE}") + message(STATUS "${HEADER_FILE} generating ${HEADER_FILE_DEFINE}") + if(CMAKE_GENERATOR MATCHES "Visual Studio") + add_custom_command( + OUTPUT ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_BINARY_DIR}/bin/$/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + DEPENDS ${spv_file} xxd + COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/$/xxd" + ) + else() + add_custom_command( + OUTPUT ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_BINARY_DIR}/bin/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + DEPENDS ${spv_file} xxd + COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/xxd" + ) + endif() + endforeach() + endfunction() + + set(KOMPUTE_OPT_BUILT_IN_VULKAN_HEADER_TAG "v1.3.239" CACHE STRING "Kompute Vulkan headers tag") + set(KOMPUTE_OPT_LOG_LEVEL Critical CACHE STRING "Kompute log level") + set(FMT_INSTALL OFF) + add_subdirectory(${LLAMA_DIR}/kompute) + + # Compile our shaders + compile_shader(SOURCES + kompute-shaders/op_scale.comp + kompute-shaders/op_scale_8.comp + kompute-shaders/op_add.comp + kompute-shaders/op_addrow.comp + kompute-shaders/op_mul.comp + kompute-shaders/op_silu.comp + kompute-shaders/op_relu.comp + kompute-shaders/op_gelu.comp + kompute-shaders/op_softmax.comp + kompute-shaders/op_norm.comp + kompute-shaders/op_rmsnorm.comp + kompute-shaders/op_diagmask.comp + kompute-shaders/op_mul_mat_mat_f32.comp + kompute-shaders/op_mul_mat_f16.comp + kompute-shaders/op_mul_mat_q8_0.comp + kompute-shaders/op_mul_mat_q4_0.comp + kompute-shaders/op_mul_mat_q4_1.comp + kompute-shaders/op_mul_mat_q6_k.comp + kompute-shaders/op_getrows_f32.comp + kompute-shaders/op_getrows_f16.comp + kompute-shaders/op_getrows_q4_0.comp + kompute-shaders/op_getrows_q4_1.comp + kompute-shaders/op_getrows_q6_k.comp + kompute-shaders/op_rope_f16.comp + kompute-shaders/op_rope_f32.comp + kompute-shaders/op_cpy_f16_f16.comp + kompute-shaders/op_cpy_f16_f32.comp + kompute-shaders/op_cpy_f32_f16.comp + kompute-shaders/op_cpy_f32_f32.comp + ) + + # Create a custom target for our generated shaders + add_custom_target(generated_shaders DEPENDS + shaderop_scale.h + shaderop_scale_8.h + shaderop_add.h + shaderop_addrow.h + shaderop_mul.h + shaderop_silu.h + shaderop_relu.h + shaderop_gelu.h + shaderop_softmax.h + shaderop_norm.h + shaderop_rmsnorm.h + shaderop_diagmask.h + shaderop_mul_mat_mat_f32.h + shaderop_mul_mat_f16.h + shaderop_mul_mat_q8_0.h + shaderop_mul_mat_q4_0.h + shaderop_mul_mat_q4_1.h + shaderop_mul_mat_q6_k.h + shaderop_getrows_f32.h + shaderop_getrows_f16.h + shaderop_getrows_q4_0.h + shaderop_getrows_q4_1.h + shaderop_getrows_q6_k.h + shaderop_rope_f16.h + shaderop_rope_f32.h + shaderop_cpy_f16_f16.h + shaderop_cpy_f16_f32.h + shaderop_cpy_f32_f16.h + shaderop_cpy_f32_f32.h + ) + + # Create a custom command that depends on the generated_shaders + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp + COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp + DEPENDS generated_shaders + COMMENT "Ensuring shaders are generated before compiling ggml-kompute.cpp" + ) + endif() + + if (LLAMA_KOMPUTE) + list(APPEND GGML_COMPILE_DEFS VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) + + # Add the stamp to the main sources to ensure dependency tracking + set(GGML_SOURCES_KOMPUTE ${LLAMA_DIR}/ggml-kompute.cpp ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp) + set(GGML_HEADERS_KOMPUTE ${LLAMA_DIR}/ggml-kompute.h) + + list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_KOMPUTE) + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute) + endif() + + set(CUDA_CXX_FLAGS "") + + if (LLAMA_CUDA) + set(CUDA_FLAGS -use_fast_math) + + if (LLAMA_FATAL_WARNINGS) + list(APPEND CUDA_FLAGS -Werror all-warnings) + endif() + + if (LLAMA_ALL_WARNINGS AND NOT MSVC) + set(NVCC_CMD ${CMAKE_CUDA_COMPILER} .c) + if (NOT CMAKE_CUDA_HOST_COMPILER STREQUAL "") + list(APPEND NVCC_CMD -ccbin ${CMAKE_CUDA_HOST_COMPILER}) endif() - set(GGML_OPENCL_SOURCES ${DIRECTORY}/${GGML_OPENCL_SOURCE_FILE} ${DIRECTORY}/ggml-opencl.h) + execute_process( + COMMAND ${NVCC_CMD} -Xcompiler --version + OUTPUT_VARIABLE CUDA_CCFULLVER + ERROR_QUIET + ) - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} clblast) - else() - message(WARNING "CLBlast not found") + if (NOT CUDA_CCFULLVER MATCHES clang) + set(CUDA_CCID "GNU") + execute_process( + COMMAND ${NVCC_CMD} -Xcompiler "-dumpfullversion -dumpversion" + OUTPUT_VARIABLE CUDA_CCVER + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + ) + else() + if (CUDA_CCFULLVER MATCHES Apple) + set(CUDA_CCID "AppleClang") + else() + set(CUDA_CCID "Clang") + endif() + string(REGEX REPLACE "^.* version ([0-9.]*).*$" "\\1" CUDA_CCVER ${CUDA_CCFULLVER}) + endif() + + message("-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}") + + get_flags(${CUDA_CCID} ${CUDA_CCVER}) + list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later + endif() + + if (NOT MSVC) + list(APPEND CUDA_CXX_FLAGS -Wno-pedantic) endif() endif() - set(GGML_METAL_SOURCES) if (LLAMA_METAL) - find_library(FOUNDATION_LIBRARY Foundation REQUIRED) - find_library(METAL_FRAMEWORK Metal REQUIRED) - find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) - find_library(METALPERFORMANCE_FRAMEWORK MetalPerformanceShaders REQUIRED) + find_library(FOUNDATION_LIBRARY Foundation REQUIRED) + find_library(METAL_FRAMEWORK Metal REQUIRED) + find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) - set(GGML_METAL_SOURCES ${DIRECTORY}/ggml-metal.m ${DIRECTORY}/ggml-metal.h) - # get full path to the file - #add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/") + message(STATUS "Metal framework found") + set(GGML_HEADERS_METAL ${DIRECTORY}/ggml-metal.h) + set(GGML_SOURCES_METAL ${DIRECTORY}/ggml-metal.m) - # copy ggml-metal.metal to bin directory - configure_file(${DIRECTORY}/ggml-metal.metal bin/ggml-metal.metal COPYONLY) + list(APPEND GGML_COMPILE_DEFS_PUBLIC GGML_USE_METAL) + if (LLAMA_METAL_NDEBUG) + list(APPEND GGML_COMPILE_DEFS GGML_METAL_NDEBUG) + endif() + + # copy ggml-common.h and ggml-metal.metal to bin directory + configure_file(${DIRECTORY}/ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COPYONLY) + configure_file(${DIRECTORY}/ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY) + + if (LLAMA_METAL_SHADER_DEBUG) + # custom command to do the following: + # xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air + # xcrun -sdk macosx metallib ggml-metal.air -o default.metallib + # + # note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works + # disabling fast math is needed in order to pass tests/test-backend-ops + # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1 + # note: unfortunately, we have to call it default.metallib instead of ggml.metallib + # ref: https://github.com/ggerganov/whisper.cpp/issues/1720 + set(XC_FLAGS -fno-fast-math -fno-inline -g) + else() + set(XC_FLAGS -O3) + endif() + + # Append macOS metal versioning flags + if (LLAMA_METAL_MACOSX_VERSION_MIN) + message(STATUS "Adding -mmacosx-version-min=${LLAMA_METAL_MACOSX_VERSION_MIN} flag to metal compilation") + list(APPEND XC_FLAGS -mmacosx-version-min=${LLAMA_METAL_MACOSX_VERSION_MIN}) + endif() + if (LLAMA_METAL_STD) + message(STATUS "Adding -std=${LLAMA_METAL_STD} flag to metal compilation") + list(APPEND XC_FLAGS -std=${LLAMA_METAL_STD}) + endif() + + add_custom_command( + OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air + COMMAND xcrun -sdk macosx metallib ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air + COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h + COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal + DEPENDS ${DIRECTORY}/ggml-metal.metal ${DIRECTORY}/ggml-common.h + COMMENT "Compiling Metal kernels" + ) + + add_custom_target( + ggml-metal ALL + DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + ) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK} - ${METALPERFORMANCE_FRAMEWORK} - ) + ) endif() + set(ARCH_FLAGS "") + + if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + message(STATUS "ARM detected") + if (MSVC) + # TODO: arm msvc? + else() + check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E) + if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") + list(APPEND ARCH_FLAGS -mfp16-format=ieee) + endif() + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6") + # Raspberry Pi 1, Zero + list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access) + endif() + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7") + if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Android") + # Android armeabi-v7a + list(APPEND ARCH_FLAGS -mfpu=neon-vfpv4 -mno-unaligned-access -funsafe-math-optimizations) + else() + # Raspberry Pi 2 + list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations) + endif() + endif() + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8") + # Android arm64-v8a + # Raspberry Pi 3, 4, Zero 2 (32-bit) + list(APPEND ARCH_FLAGS -mno-unaligned-access) + endif() + endif() + elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64)$")) + message(STATUS "x86 detected") + if (MSVC) + if (LLAMA_AVX512) + list(APPEND ARCH_FLAGS /arch:AVX512) + # MSVC has no compile-time flags enabling specific + # AVX512 extensions, neither it defines the + # macros corresponding to the extensions. + # Do it manually. + if (LLAMA_AVX512_VBMI) + list(APPEND GGML_COMPILE_DEFS $<$:__AVX512VBMI__>) + list(APPEND GGML_COMPILE_DEFS $<$:__AVX512VBMI__>) + endif() + if (LLAMA_AVX512_VNNI) + list(APPEND GGML_COMPILE_DEFS $<$:__AVX512VNNI__>) + list(APPEND GGML_COMPILE_DEFS $<$:__AVX512VNNI__>) + endif() + elseif (LLAMA_AVX2) + list(APPEND ARCH_FLAGS /arch:AVX2) + elseif (LLAMA_AVX) + list(APPEND ARCH_FLAGS /arch:AVX) + endif() + else() + if (LLAMA_NATIVE) + list(APPEND ARCH_FLAGS -march=native) + endif() + if (LLAMA_F16C) + list(APPEND ARCH_FLAGS -mf16c) + endif() + if (LLAMA_FMA) + list(APPEND ARCH_FLAGS -mfma) + endif() + if (LLAMA_AVX) + list(APPEND ARCH_FLAGS -mavx) + endif() + if (LLAMA_AVX2) + list(APPEND ARCH_FLAGS -mavx2) + endif() + if (LLAMA_AVX512) + list(APPEND ARCH_FLAGS -mavx512f) + list(APPEND ARCH_FLAGS -mavx512bw) + endif() + if (LLAMA_AVX512_VBMI) + list(APPEND ARCH_FLAGS -mavx512vbmi) + endif() + if (LLAMA_AVX512_VNNI) + list(APPEND ARCH_FLAGS -mavx512vnni) + endif() + endif() + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") + message(STATUS "PowerPC detected") + if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") + list(APPEND ARCH_FLAGS -mcpu=powerpc64le) + else() + list(APPEND ARCH_FLAGS -mcpu=native -mtune=native) + #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) + endif() + else() + message(STATUS "Unknown architecture") + endif() + + list(APPEND GGML_COMPILE_OPTS "$<$:${ARCH_FLAGS}>") + list(APPEND GGML_COMPILE_OPTS "$<$:${ARCH_FLAGS}>") + + if (LLAMA_CUDA) + list(APPEND CUDA_CXX_FLAGS ${ARCH_FLAGS}) + list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument + if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "") + list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED}) + endif() + list(APPEND GGML_COMPILE_OPTS "$<$:${CUDA_FLAGS}>") + endif() + + # ggml + add_library(ggml${SUFFIX} OBJECT ${DIRECTORY}/ggml.c ${DIRECTORY}/ggml.h @@ -601,145 +932,70 @@ function(include_ggml DIRECTORY SUFFIX WITH_LLAMA) ${DIRECTORY}/ggml-alloc.h ${DIRECTORY}/ggml-backend.c ${DIRECTORY}/ggml-backend.h - ${DIRECTORY}/ggml-quants.h ${DIRECTORY}/ggml-quants.c - ${GGML_SOURCES_CUDA} - ${GGML_METAL_SOURCES} - ${GGML_OPENCL_SOURCES} - ${GGML_SOURCES_KOMPUTE}) + ${DIRECTORY}/ggml-quants.h + ${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA} + ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL} + ${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL} + ${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE} + ${GGML_SOURCES_VULKAN} ${GGML_HEADERS_VULKAN} + ${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM} + ${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE} + ) - if (LLAMA_METAL AND GGML_METAL_SOURCES) - target_compile_definitions(ggml${SUFFIX} PUBLIC GGML_USE_METAL GGML_METAL_NDEBUG) - endif() - target_include_directories(ggml${SUFFIX} PUBLIC ${DIRECTORY}) + target_include_directories(ggml${SUFFIX} PUBLIC ${DIRECTORY} ${LLAMA_EXTRA_INCLUDES}) target_compile_features(ggml${SUFFIX} PUBLIC c_std_11) # don't bump + target_link_libraries(ggml${SUFFIX} PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS}) + if (BUILD_SHARED_LIBS) set_target_properties(ggml${SUFFIX} PROPERTIES POSITION_INDEPENDENT_CODE ON) endif() - if (WITH_LLAMA) - # Backwards compatibility with old llama.cpp versions -# set(LLAMA_UTIL_SOURCE_FILE llama-util.h) - if (NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}/${LLAMA_UTIL_SOURCE_FILE}) - set(LLAMA_UTIL_SOURCE_FILE llama_util.h) - endif() + # llama - add_library(llama${SUFFIX} STATIC - ${DIRECTORY}/llama.cpp - ${DIRECTORY}/llama.h) + add_library(llama${SUFFIX} STATIC + ${DIRECTORY}/llama.cpp + ${DIRECTORY}/llama.h + ${DIRECTORY}/unicode.h + ${DIRECTORY}/unicode.cpp + ${DIRECTORY}/unicode-data.cpp + ) - if (LLAMA_METAL AND GGML_METAL_SOURCES) - target_compile_definitions(llama${SUFFIX} PUBLIC GGML_USE_METAL GGML_METAL_NDEBUG) - endif() - target_include_directories(llama${SUFFIX} PUBLIC ${DIRECTORY}) - target_compile_features(llama${SUFFIX} PUBLIC cxx_std_11) # don't bump + target_include_directories(llama${SUFFIX} PUBLIC ${DIRECTORY}) + target_compile_features (llama${SUFFIX} PUBLIC cxx_std_11) # don't bump - if (BUILD_SHARED_LIBS) - set_target_properties(llama${SUFFIX} PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_compile_definitions(llama${SUFFIX} PRIVATE LLAMA_SHARED LLAMA_BUILD) - endif() + target_link_libraries(llama${SUFFIX} PRIVATE + ggml${SUFFIX} + ${LLAMA_EXTRA_LIBS} + ) + + if (BUILD_SHARED_LIBS) + set_target_properties(llama${SUFFIX} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(llama${SUFFIX} PRIVATE LLAMA_SHARED LLAMA_BUILD) endif() - if (GGML_SOURCES_CUDA) - message(STATUS "GGML CUDA sources found, configuring CUDA architecture") - set_property(TARGET ggml${SUFFIX} PROPERTY CUDA_ARCHITECTURES OFF) - set_property(TARGET ggml${SUFFIX} PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto") - if (WITH_LLAMA) - set_property(TARGET llama${SUFFIX} PROPERTY CUDA_ARCHITECTURES OFF) - endif() + # target options + + set_target_properties(ggml${SUFFIX} llama${SUFFIX} PROPERTIES + CXX_STANDARD 11 + CXX_STANDARD_REQUIRED true + C_STANDARD 11 + C_STANDARD_REQUIRED true + ) + if (GGML_CUDA_ARCHITECTURES) + set_property(TARGET ggml${SUFFIX} llama${SUFFIX} PROPERTY CUDA_ARCHITECTURES "${GGML_CUDA_ARCHITECTURES}") endif() - if (GGML_CUBLAS_USE) - target_compile_definitions(ggml${SUFFIX} PRIVATE - GGML_USE_CUBLAS - GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X} - GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y}) - if (WITH_LLAMA) - target_compile_definitions(llama${SUFFIX} PRIVATE - GGML_USE_CUBLAS - GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X} - GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y}) - endif() - endif() - if (GGML_CLBLAST_USE) - if (WITH_LLAMA) - target_compile_definitions(llama${SUFFIX} PRIVATE GGML_USE_CLBLAST) - endif() - target_compile_definitions(ggml${SUFFIX} PRIVATE GGML_USE_CLBLAST) - endif() + target_compile_options(ggml${SUFFIX} PRIVATE "${GGML_COMPILE_OPTS}") + target_compile_options(llama${SUFFIX} PRIVATE "${GGML_COMPILE_OPTS}") - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") - message(STATUS "ARM detected") - if (MSVC) - # TODO: arm msvc? - else() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") - target_compile_options(ggml${SUFFIX} PRIVATE -mcpu=native) - endif() - # TODO: armv6,7,8 version specific flags - endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$") - message(STATUS "x86 detected") - if (MSVC) - if (LLAMA_AVX512) - target_compile_options(ggml${SUFFIX} PRIVATE - $<$:/arch:AVX512> - $<$:/arch:AVX512>) - # MSVC has no compile-time flags enabling specific - # AVX512 extensions, neither it defines the - # macros corresponding to the extensions. - # Do it manually. - if (LLAMA_AVX512_VBMI) - target_compile_definitions(ggml${SUFFIX} PRIVATE - $<$:__AVX512VBMI__> - $<$:__AVX512VBMI__>) - endif() - if (LLAMA_AVX512_VNNI) - target_compile_definitions(ggml${SUFFIX} PRIVATE - $<$:__AVX512VNNI__> - $<$:__AVX512VNNI__>) - endif() - elseif (LLAMA_AVX2) - target_compile_options(ggml${SUFFIX} PRIVATE - $<$:/arch:AVX2> - $<$:/arch:AVX2>) - elseif (LLAMA_AVX) - target_compile_options(ggml${SUFFIX} PRIVATE - $<$:/arch:AVX> - $<$:/arch:AVX>) - endif() - else() - if (LLAMA_F16C) - target_compile_options(ggml${SUFFIX} PRIVATE -mf16c) - endif() - if (LLAMA_FMA) - target_compile_options(ggml${SUFFIX} PRIVATE -mfma) - endif() - if (LLAMA_AVX) - target_compile_options(ggml${SUFFIX} PRIVATE -mavx) - endif() - if (LLAMA_AVX2) - target_compile_options(ggml${SUFFIX} PRIVATE -mavx2) - endif() - if (LLAMA_AVX512) - target_compile_options(ggml${SUFFIX} PRIVATE -mavx512f) - target_compile_options(ggml${SUFFIX} PRIVATE -mavx512bw) - endif() - if (LLAMA_AVX512_VBMI) - target_compile_options(ggml${SUFFIX} PRIVATE -mavx512vbmi) - endif() - if (LLAMA_AVX512_VNNI) - target_compile_options(ggml${SUFFIX} PRIVATE -mavx512vnni) - endif() - endif() - else() - # TODO: support PowerPC - message(STATUS "Unknown architecture") - endif() + target_compile_definitions(ggml${SUFFIX} PRIVATE "${GGML_COMPILE_DEFS}") + target_compile_definitions(llama${SUFFIX} PRIVATE "${GGML_COMPILE_DEFS}") - target_link_libraries(ggml${SUFFIX} PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS}) - if (WITH_LLAMA) - target_link_libraries(llama${SUFFIX} PRIVATE ggml${SUFFIX} ${LLAMA_EXTRA_LIBS}) - endif() + target_compile_definitions(ggml${SUFFIX} PUBLIC "${GGML_COMPILE_DEFS_PUBLIC}") + target_compile_definitions(llama${SUFFIX} PUBLIC "${GGML_COMPILE_DEFS_PUBLIC}") + + target_link_options(ggml${SUFFIX} PRIVATE "${GGML_LINK_OPTS}") + target_link_options(llama${SUFFIX} PRIVATE "${GGML_LINK_OPTS}") endfunction() diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index 250766bb..e88ad9fe 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -22,7 +22,11 @@ #include #include #ifdef GGML_USE_KOMPUTE -#include +# include +#elif GGML_USE_VULKAN +# include +#elif GGML_USE_CUDA +# include #endif using namespace std::string_literals; @@ -32,13 +36,44 @@ static constexpr int GGUF_VER_MAX = 3; static const char * const modelType_ = "LLaMA"; +// note: same order as LLM_ARCH_NAMES in llama.cpp static const std::vector KNOWN_ARCHES { - "baichuan", "bert", "bloom", "codeshell", "falcon", "gemma", "gpt2", "llama", "mpt", "nomic-bert", "orion", - "persimmon", "phi2", "plamo", "qwen", "qwen2", "refact", "stablelm", "starcoder" + "llama", + "falcon", + // "grok", -- 314B parameters + "gpt2", + // "gptj", -- no inference code + // "gptneox", -- no inference code + "mpt", + "baichuan", + "starcoder", + // "persimmon", -- CUDA generates garbage + "refact", + "bert", + "nomic-bert", + "bloom", + "stablelm", + "qwen", + "qwen2", + "qwen2moe", + "phi2", + "phi3", + // "plamo", -- https://github.com/ggerganov/llama.cpp/issues/5669 + "codeshell", + "orion", + "internlm2", + // "minicpm", -- CUDA generates garbage + "gemma", + "starcoder2", + // "mamba", -- CUDA missing SSM_CONV + "xverse", + "command-r", + // "dbrx", -- 16x12B parameters + "olmo", }; static const std::vector EMBEDDING_ARCHES { - "bert", "nomic-bert" + "bert", "nomic-bert", }; static bool is_embedding_arch(const std::string &arch) { @@ -170,6 +205,7 @@ struct LLamaPrivate { const std::string modelPath; bool modelLoaded = false; int device = -1; + std::string deviceName; llama_model *model = nullptr; llama_context *ctx = nullptr; llama_model_params model_params; @@ -313,10 +349,11 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl) d_ptr->backend_name = "cpu"; // default -#ifdef GGML_USE_KOMPUTE +#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) if (d_ptr->device != -1) { d_ptr->model_params.main_gpu = d_ptr->device; d_ptr->model_params.n_gpu_layers = ngl; + d_ptr->model_params.split_mode = LLAMA_SPLIT_MODE_NONE; } #elif defined(GGML_USE_METAL) (void)ngl; @@ -337,6 +374,7 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl) if (!d_ptr->model) { fflush(stdout); d_ptr->device = -1; + d_ptr->deviceName.clear(); std::cerr << "LLAMA ERROR: failed to load model from " << modelPath << std::endl; return false; } @@ -379,19 +417,24 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl) llama_free_model(d_ptr->model); d_ptr->model = nullptr; d_ptr->device = -1; + d_ptr->deviceName.clear(); return false; } d_ptr->end_tokens = {llama_token_eos(d_ptr->model)}; -#ifdef GGML_USE_KOMPUTE if (usingGPUDevice()) { +#ifdef GGML_USE_KOMPUTE if (llama_verbose()) { - std::cerr << "llama.cpp: using Vulkan on " << ggml_vk_current_device().name << std::endl; + std::cerr << "llama.cpp: using Vulkan on " << d_ptr->deviceName << std::endl; } d_ptr->backend_name = "kompute"; - } +#elif defined(GGML_USE_VULKAN) + d_ptr->backend_name = "vulkan"; +#elif defined(GGML_USE_CUDA) + d_ptr->backend_name = "cuda"; #endif + } m_supportsEmbedding = isEmbedding; m_supportsCompletion = !isEmbedding; @@ -452,7 +495,18 @@ std::vector LLamaModel::tokenize(PromptContext &ctx, const std:: std::string LLamaModel::tokenToString(Token id) const { - return llama_token_to_piece(d_ptr->ctx, id); + std::vector result(8, 0); + const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), false); + if (n_tokens < 0) { + result.resize(-n_tokens); + int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), false); + GGML_ASSERT(check == -n_tokens); + } + else { + result.resize(n_tokens); + } + + return std::string(result.data(), result.size()); } LLModel::Token LLamaModel::sampleToken(PromptContext &promptCtx) const @@ -517,34 +571,77 @@ int32_t LLamaModel::layerCount(std::string const &modelPath) const return get_arch_key_u32(modelPath, "block_count"); } +#ifdef GGML_USE_VULKAN +static const char *getVulkanVendorName(uint32_t vendorID) { + switch (vendorID) { + case 0x10DE: return "nvidia"; + case 0x1002: return "amd"; + case 0x8086: return "intel"; + default: return "unknown"; + } +} +#endif + std::vector LLamaModel::availableGPUDevices(size_t memoryRequired) const { -#ifdef GGML_USE_KOMPUTE +#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) size_t count = 0; - auto * vkDevices = ggml_vk_available_devices(memoryRequired, &count); - if (vkDevices) { +#ifdef GGML_USE_KOMPUTE + auto *lcppDevices = ggml_vk_available_devices(memoryRequired, &count); +#elif defined(GGML_USE_VULKAN) + (void)memoryRequired; // hasn't been used since GGUF was added + auto *lcppDevices = ggml_vk_available_devices(&count); +#else // defined(GGML_USE_CUDA) + (void)memoryRequired; + auto *lcppDevices = ggml_cuda_available_devices(&count); +#endif + + if (lcppDevices) { std::vector devices; devices.reserve(count); for (size_t i = 0; i < count; ++i) { - auto & dev = vkDevices[i]; + auto & dev = lcppDevices[i]; + devices.emplace_back( +#ifdef GGML_USE_KOMPUTE + /* backend = */ "kompute", /* index = */ dev.index, /* type = */ dev.type, /* heapSize = */ dev.heapSize, /* name = */ dev.name, /* vendor = */ dev.vendor +#elif defined(GGML_USE_VULKAN) + /* backend = */ "vulkan", + /* index = */ dev.index, + /* type = */ dev.type, + /* heapSize = */ dev.heapSize, + /* name = */ dev.name, + /* vendor = */ getVulkanVendorName(dev.vendorID) +#else // defined(GGML_USE_CUDA) + /* backend = */ "cuda", + /* index = */ dev.index, + /* type = */ 2, // vk::PhysicalDeviceType::eDiscreteGpu + /* heapSize = */ dev.heapSize, + /* name = */ dev.name, + /* vendor = */ "nvidia" +#endif ); + +#ifndef GGML_USE_CUDA ggml_vk_device_destroy(&dev); +#else + ggml_cuda_device_destroy(&dev); +#endif } - free(vkDevices); + free(lcppDevices); return devices; } #else (void)memoryRequired; - std::cerr << __func__ << ": built without Kompute\n"; + std::cerr << __func__ << ": built without a GPU backend\n"; #endif return {}; @@ -552,11 +649,32 @@ std::vector LLamaModel::availableGPUDevices(size_t memoryReq bool LLamaModel::initializeGPUDevice(size_t memoryRequired, const std::string &name) const { -#if defined(GGML_USE_KOMPUTE) +#if defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) + auto devices = availableGPUDevices(memoryRequired); + + auto dev_it = devices.begin(); +#ifndef GGML_USE_CUDA + if (name == "amd" || name == "nvidia" || name == "intel") { + dev_it = std::find_if(dev_it, devices.end(), [&name](auto &dev) { return dev.vendor == name; }); + } else +#endif + if (name != "gpu") { + dev_it = std::find_if(dev_it, devices.end(), [&name](auto &dev) { return dev.name == name; }); + } + + if (dev_it < devices.end()) { + d_ptr->device = dev_it->index; + d_ptr->deviceName = dev_it->name; + return true; + } + return false; +#elif defined(GGML_USE_KOMPUTE) ggml_vk_device device; bool ok = ggml_vk_get_device(&device, memoryRequired, name.c_str()); if (ok) { d_ptr->device = device.index; + d_ptr->deviceName = device.name; + ggml_vk_device_destroy(&device); return true; } #else @@ -568,14 +686,17 @@ bool LLamaModel::initializeGPUDevice(size_t memoryRequired, const std::string &n bool LLamaModel::initializeGPUDevice(int device, std::string *unavail_reason) const { -#if defined(GGML_USE_KOMPUTE) +#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) (void)unavail_reason; + auto devices = availableGPUDevices(); + auto it = std::find_if(devices.begin(), devices.end(), [device](auto &dev) { return dev.index == device; }); d_ptr->device = device; + d_ptr->deviceName = it < devices.end() ? it->name : "(unknown)"; return true; #else (void)device; if (unavail_reason) { - *unavail_reason = "built without Kompute"; + *unavail_reason = "built without a GPU backend"; } return false; #endif @@ -583,7 +704,7 @@ bool LLamaModel::initializeGPUDevice(int device, std::string *unavail_reason) co bool LLamaModel::hasGPUDevice() const { -#if defined(GGML_USE_KOMPUTE) +#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) return d_ptr->device != -1; #else return false; @@ -592,15 +713,20 @@ bool LLamaModel::hasGPUDevice() const bool LLamaModel::usingGPUDevice() const { -#if defined(GGML_USE_KOMPUTE) - bool hasDevice = hasGPUDevice() && d_ptr->model_params.n_gpu_layers > 0; + bool hasDevice; + +#ifdef GGML_USE_KOMPUTE + hasDevice = hasGPUDevice() && d_ptr->model_params.n_gpu_layers > 0; assert(!hasDevice || ggml_vk_has_device()); - return hasDevice; +#elif defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) + hasDevice = hasGPUDevice() && d_ptr->model_params.n_gpu_layers > 0; #elif defined(GGML_USE_METAL) - return true; + hasDevice = true; #else - return false; + hasDevice = false; #endif + + return hasDevice; } const char *LLamaModel::backendName() const { @@ -608,11 +734,11 @@ const char *LLamaModel::backendName() const { } const char *LLamaModel::gpuDeviceName() const { -#if defined(GGML_USE_KOMPUTE) if (usingGPUDevice()) { - return ggml_vk_current_device().name; - } +#if defined(GGML_USE_KOMPUTE) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CUDA) + return d_ptr->deviceName.c_str(); #endif + } return nullptr; } diff --git a/gpt4all-backend/llamamodel_impl.h b/gpt4all-backend/llamamodel_impl.h index d36dc6d2..2051fd3b 100644 --- a/gpt4all-backend/llamamodel_impl.h +++ b/gpt4all-backend/llamamodel_impl.h @@ -30,7 +30,7 @@ public: size_t restoreState(const uint8_t *src) override; void setThreadCount(int32_t n_threads) override; int32_t threadCount() const override; - std::vector availableGPUDevices(size_t memoryRequired) const override; + std::vector availableGPUDevices(size_t memoryRequired = 0) const override; bool initializeGPUDevice(size_t memoryRequired, const std::string &name) const override; bool initializeGPUDevice(int device, std::string *unavail_reason = nullptr) const override; bool hasGPUDevice() const override; diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index e54effc3..3f359c9e 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -12,12 +12,21 @@ #include #include #include +#include #include #ifdef _MSC_VER #include #endif +#ifndef __APPLE__ +static const std::string DEFAULT_BACKENDS[] = {"kompute", "cpu"}; +#elif defined(__aarch64__) +static const std::string DEFAULT_BACKENDS[] = {"metal", "cpu"}; +#else +static const std::string DEFAULT_BACKENDS[] = {"cpu"}; +#endif + std::string s_implementations_search_path = "."; #if !(defined(__x86_64__) || defined(_M_X64)) @@ -86,11 +95,9 @@ const std::vector &LLModel::Implementation::implementat static auto* libs = new std::vector([] () { std::vector fres; - std::string impl_name_re = "(gptj|llamamodel-mainline)"; + std::string impl_name_re = "(gptj|llamamodel-mainline)-(cpu|metal|kompute|vulkan|cuda)"; if (cpu_supports_avx2() == 0) { impl_name_re += "-avxonly"; - } else { - impl_name_re += "-(default|metal)"; } std::regex re(impl_name_re); auto search_in_directory = [&](const std::string& paths) { @@ -125,6 +132,13 @@ const std::vector &LLModel::Implementation::implementat return *libs; } +static std::string applyCPUVariant(const std::string &buildVariant) { + if (buildVariant != "metal" && cpu_supports_avx2() == 0) { + return buildVariant + "-avxonly"; + } + return buildVariant; +} + const LLModel::Implementation* LLModel::Implementation::implementation(const char *fname, const std::string& buildVariant) { bool buildVariantMatched = false; std::optional archName; @@ -142,110 +156,124 @@ const LLModel::Implementation* LLModel::Implementation::implementation(const cha } if (!buildVariantMatched) - throw MissingImplementationError("Could not find any implementations for build variant: " + buildVariant); + return nullptr; if (!archName) throw UnsupportedModelError("Unsupported file format"); throw BadArchError(std::move(*archName)); } -LLModel *LLModel::Implementation::construct(const std::string &modelPath, std::string buildVariant, int n_ctx) { - // Get correct implementation - const Implementation* impl = nullptr; - - #if defined(__APPLE__) && defined(__arm64__) // FIXME: See if metal works for intel macs - if (buildVariant == "auto") { - size_t total_mem = getSystemTotalRAMInBytes(); - try { - impl = implementation(modelPath.c_str(), "metal"); - } catch (const std::exception &e) { - // fall back to CPU - } - if(impl) { - LLModel* metalimpl = impl->m_construct(); - metalimpl->m_implementation = impl; - /* TODO(cebtenzzre): after we fix requiredMem, we should change this to happen at - * load time, not construct time. right now n_ctx is incorrectly hardcoded 2048 in - * most (all?) places where this is called, causing underestimation of required - * memory. */ - size_t req_mem = metalimpl->requiredMem(modelPath, n_ctx, 100); - float req_to_total = (float) req_mem / (float) total_mem; - // on a 16GB M2 Mac a 13B q4_0 (0.52) works for me but a 13B q4_K_M (0.55) does not - if (req_to_total >= 0.53) { - delete metalimpl; - impl = nullptr; - } else { - return metalimpl; - } - } - } - #else - (void)n_ctx; - #endif - - if (!impl) { - //TODO: Auto-detect CUDA/OpenCL - if (buildVariant == "auto") { - if (cpu_supports_avx2() == 0) { - buildVariant = "avxonly"; - } else { - buildVariant = "default"; - } - } - impl = implementation(modelPath.c_str(), buildVariant); +LLModel *LLModel::Implementation::construct(const std::string &modelPath, const std::string &backend, int n_ctx) { + std::vector desiredBackends; + if (backend != "auto") { + desiredBackends.push_back(backend); + } else { + desiredBackends.insert(desiredBackends.end(), DEFAULT_BACKENDS, std::end(DEFAULT_BACKENDS)); } - // Construct and return llmodel implementation - auto fres = impl->m_construct(); - fres->m_implementation = impl; - return fres; + for (const auto &desiredBackend: desiredBackends) { + const auto *impl = implementation(modelPath.c_str(), applyCPUVariant(desiredBackend)); + + if (impl) { + // Construct llmodel implementation + auto *fres = impl->m_construct(); + fres->m_implementation = impl; + +#if defined(__APPLE__) && defined(__aarch64__) // FIXME: See if metal works for intel macs + /* TODO(cebtenzzre): after we fix requiredMem, we should change this to happen at + * load time, not construct time. right now n_ctx is incorrectly hardcoded 2048 in + * most (all?) places where this is called, causing underestimation of required + * memory. */ + if (backend == "auto" && desiredBackend == "metal") { + // on a 16GB M2 Mac a 13B q4_0 (0.52) works for me but a 13B q4_K_M (0.55) does not + size_t req_mem = fres->requiredMem(modelPath, n_ctx, 100); + if (req_mem >= size_t(0.53f * getSystemTotalRAMInBytes())) { + delete fres; + continue; + } + } +#else + (void)n_ctx; +#endif + + return fres; + } + } + + throw MissingImplementationError("Could not find any implementations for backend: " + backend); } -LLModel *LLModel::Implementation::constructDefaultLlama() { - static std::unique_ptr llama([]() -> LLModel * { - const std::vector *impls; - try { - impls = &implementationList(); - } catch (const std::runtime_error &e) { - std::cerr << __func__ << ": implementationList failed: " << e.what() << "\n"; - return nullptr; - } +LLModel *LLModel::Implementation::constructGlobalLlama(const std::optional &backend) { + static std::unordered_map> implCache; + + const std::vector *impls; + try { + impls = &implementationList(); + } catch (const std::runtime_error &e) { + std::cerr << __func__ << ": implementationList failed: " << e.what() << "\n"; + return nullptr; + } + + std::vector desiredBackends; + if (backend) { + desiredBackends.push_back(backend.value()); + } else { + desiredBackends.insert(desiredBackends.end(), DEFAULT_BACKENDS, std::end(DEFAULT_BACKENDS)); + } + + const Implementation *impl = nullptr; + + for (const auto &desiredBackend: desiredBackends) { + auto cacheIt = implCache.find(desiredBackend); + if (cacheIt != implCache.end()) + return cacheIt->second.get(); // cached - const LLModel::Implementation *impl = nullptr; for (const auto &i: *impls) { - if (i.m_buildVariant == "metal" || i.m_modelType != "LLaMA") continue; - impl = &i; - } - if (!impl) { - std::cerr << __func__ << ": could not find llama.cpp implementation\n"; - return nullptr; + if (i.m_modelType == "LLaMA" && i.m_buildVariant == applyCPUVariant(desiredBackend)) { + impl = &i; + break; + } } - auto fres = impl->m_construct(); - fres->m_implementation = impl; - return fres; - }()); - return llama.get(); + if (impl) { + auto *fres = impl->m_construct(); + fres->m_implementation = impl; + implCache[desiredBackend] = std::unique_ptr(fres); + return fres; + } + } + + std::cerr << __func__ << ": could not find Llama implementation for backend: " << backend.value_or("default") << "\n"; + return nullptr; } std::vector LLModel::Implementation::availableGPUDevices(size_t memoryRequired) { - auto *llama = constructDefaultLlama(); - if (llama) { return llama->availableGPUDevices(memoryRequired); } - return {}; + std::vector devices; +#ifndef __APPLE__ + static const std::string backends[] = {"kompute", "cuda"}; + for (const auto &backend: backends) { + auto *llama = constructGlobalLlama(backend); + if (llama) { + auto backendDevs = llama->availableGPUDevices(memoryRequired); + devices.insert(devices.end(), backendDevs.begin(), backendDevs.end()); + } + } +#endif + return devices; } int32_t LLModel::Implementation::maxContextLength(const std::string &modelPath) { - auto *llama = constructDefaultLlama(); + auto *llama = constructGlobalLlama(); return llama ? llama->maxContextLength(modelPath) : -1; } int32_t LLModel::Implementation::layerCount(const std::string &modelPath) { - auto *llama = constructDefaultLlama(); + auto *llama = constructGlobalLlama(); return llama ? llama->layerCount(modelPath) : -1; } bool LLModel::Implementation::isEmbeddingModel(const std::string &modelPath) { - auto *llama = constructDefaultLlama(); + auto *llama = constructGlobalLlama(); return llama && llama->isEmbeddingModel(modelPath); } diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index 1aca1e44..51bf4a23 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -1,6 +1,7 @@ #ifndef LLMODEL_H #define LLMODEL_H +#include #include #include #include @@ -8,8 +9,11 @@ #include #include #include +#include #include +using namespace std::string_literals; + #define LLMODEL_MAX_PROMPT_BATCH 128 class Dlhandle; @@ -41,14 +45,35 @@ public: }; struct GPUDevice { + const char *backend; int index; int type; size_t heapSize; std::string name; std::string vendor; - GPUDevice(int index, int type, size_t heapSize, std::string name, std::string vendor): - index(index), type(type), heapSize(heapSize), name(std::move(name)), vendor(std::move(vendor)) {} + GPUDevice(const char *backend, int index, int type, size_t heapSize, std::string name, std::string vendor): + backend(backend), index(index), type(type), heapSize(heapSize), name(std::move(name)), + vendor(std::move(vendor)) {} + + std::string selectionName() const { return m_backendNames.at(backend) + ": " + name; } + std::string reportedName() const { return name + " (" + m_backendNames.at(backend) + ")"; } + + static std::string updateSelectionName(const std::string &name) { + if (name == "Auto" || name == "CPU" || name == "Metal") + return name; + auto it = std::find_if(m_backendNames.begin(), m_backendNames.end(), [&name](const auto &entry) { + return name.starts_with(entry.second + ": "); + }); + if (it != m_backendNames.end()) + return name; + return "Vulkan: " + name; // previously, there were only Vulkan devices + } + + private: + static inline const std::unordered_map m_backendNames { + {"cuda", "CUDA"}, {"kompute", "Vulkan"}, + }; }; class Implementation { @@ -60,7 +85,7 @@ public: std::string_view modelType() const { return m_modelType; } std::string_view buildVariant() const { return m_buildVariant; } - static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto", int n_ctx = 2048); + static LLModel *construct(const std::string &modelPath, const std::string &backend = "auto", int n_ctx = 2048); static std::vector availableGPUDevices(size_t memoryRequired = 0); static int32_t maxContextLength(const std::string &modelPath); static int32_t layerCount(const std::string &modelPath); @@ -76,7 +101,7 @@ public: static const std::vector &implementationList(); static const Implementation *implementation(const char *fname, const std::string &buildVariant); - static LLModel *constructDefaultLlama(); + static LLModel *constructGlobalLlama(const std::optional &backend = std::nullopt); char *(*m_getFileArch)(const char *fname); bool (*m_isArchSupported)(const char *arch); diff --git a/gpt4all-backend/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp index 52dae104..139bd9eb 100644 --- a/gpt4all-backend/llmodel_c.cpp +++ b/gpt4all-backend/llmodel_c.cpp @@ -31,10 +31,10 @@ static void llmodel_set_error(const char **errptr, const char *message) { } } -llmodel_model llmodel_model_create2(const char *model_path, const char *build_variant, const char **error) { +llmodel_model llmodel_model_create2(const char *model_path, const char *backend, const char **error) { LLModel *llModel; try { - llModel = LLModel::Implementation::construct(model_path, build_variant); + llModel = LLModel::Implementation::construct(model_path, backend); } catch (const std::exception& e) { llmodel_set_error(error, e.what()); return nullptr; @@ -248,6 +248,7 @@ struct llmodel_gpu_device *llmodel_available_gpu_devices(size_t memoryRequired, for (unsigned i = 0; i < devices.size(); i++) { const auto &dev = devices[i]; auto &cdev = c_devices[i]; + cdev.backend = dev.backend; cdev.index = dev.index; cdev.type = dev.type; cdev.heapSize = dev.heapSize; diff --git a/gpt4all-backend/llmodel_c.h b/gpt4all-backend/llmodel_c.h index 35e08be1..5e7bea7f 100644 --- a/gpt4all-backend/llmodel_c.h +++ b/gpt4all-backend/llmodel_c.h @@ -48,6 +48,7 @@ struct llmodel_prompt_context { }; struct llmodel_gpu_device { + const char * backend; int index; int type; // same as VkPhysicalDeviceType size_t heapSize; @@ -86,7 +87,7 @@ typedef bool (*llmodel_recalculate_callback)(bool is_recalculating); * Embedding cancellation callback for use with llmodel_embed. * @param batch_sizes The number of tokens in each batch that will be embedded. * @param n_batch The number of batches that will be embedded. - * @param backend The backend that will be used for embedding. One of "cpu", "kompute", or "metal". + * @param backend The backend that will be used for embedding. One of "cpu", "kompute", "cuda", or "metal". * @return True to cancel llmodel_embed, false to continue. */ typedef bool (*llmodel_emb_cancel_callback)(unsigned *batch_sizes, unsigned n_batch, const char *backend); @@ -103,11 +104,11 @@ DEPRECATED llmodel_model llmodel_model_create(const char *model_path); * Create a llmodel instance. * Recognises correct model type from file at model_path * @param model_path A string representing the path to the model file; will only be used to detect model type. - * @param build_variant A string representing the implementation to use (auto, default, avxonly, ...), + * @param backend A string representing the implementation to use. One of 'auto', 'cpu', 'metal', 'kompute', or 'cuda'. * @param error A pointer to a string; will only be set on error. * @return A pointer to the llmodel_model instance; NULL on error. */ -llmodel_model llmodel_model_create2(const char *model_path, const char *build_variant, const char **error); +llmodel_model llmodel_model_create2(const char *model_path, const char *backend, const char **error); /** * Destroy a llmodel instance. diff --git a/gpt4all-bindings/python/README.md b/gpt4all-bindings/python/README.md index c380d169..604beb90 100644 --- a/gpt4all-bindings/python/README.md +++ b/gpt4all-bindings/python/README.md @@ -23,9 +23,9 @@ As an alternative to downloading via pip, you may build the Python bindings from ### Prerequisites -On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home +You will need a compiler. On Windows, you should install Visual Studio with the C++ Development components. On macOS, you will need the full version of Xcode—Xcode Command Line Tools lacks certain required tools. On Linux, you will need a GCC or Clang toolchain with C++ support. -macOS users do not need Vulkan, as GPT4All will use Metal instead. +On Windows and Linux, building GPT4All with full GPU support requires the [Vulkan SDK](https://vulkan.lunarg.com/sdk/home) and the latest [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads). ### Building the python bindings diff --git a/gpt4all-bindings/python/gpt4all/_pyllmodel.py b/gpt4all-bindings/python/gpt4all/_pyllmodel.py index ce2122eb..f5987c36 100644 --- a/gpt4all-bindings/python/gpt4all/_pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/_pyllmodel.py @@ -71,6 +71,7 @@ class LLModelPromptContext(ctypes.Structure): class LLModelGPUDevice(ctypes.Structure): _fields_ = [ + ("backend", ctypes.c_char_p), ("index", ctypes.c_int32), ("type", ctypes.c_int32), ("heapSize", ctypes.c_size_t), @@ -200,9 +201,11 @@ class LLModel: Maximum size of context window ngl : int Number of GPU layers to use (Vulkan) + backend : str + Backend to use. One of 'auto', 'cpu', 'metal', 'kompute', or 'cuda'. """ - def __init__(self, model_path: str, n_ctx: int, ngl: int): + def __init__(self, model_path: str, n_ctx: int, ngl: int, backend: str): self.model_path = model_path.encode() self.n_ctx = n_ctx self.ngl = ngl @@ -212,7 +215,7 @@ class LLModel: # Construct a model implementation err = ctypes.c_char_p() - model = llmodel.llmodel_model_create2(self.model_path, b"auto", ctypes.byref(err)) + model = llmodel.llmodel_model_create2(self.model_path, backend.encode(), ctypes.byref(err)) if model is None: s = err.value raise RuntimeError(f"Unable to instantiate model: {'null' if s is None else s.decode()}") @@ -231,7 +234,7 @@ class LLModel: raise ValueError("Attempted operation on a closed LLModel") @property - def backend(self) -> Literal["cpu", "kompute", "metal"]: + def backend(self) -> Literal["cpu", "kompute", "cuda", "metal"]: if self.model is None: self._raise_closed() return llmodel.llmodel_model_backend_name(self.model).decode() @@ -258,7 +261,7 @@ class LLModel: devices_ptr = llmodel.llmodel_available_gpu_devices(mem_required, ctypes.byref(num_devices)) if not devices_ptr: raise ValueError("Unable to retrieve available GPU devices") - return [d.name.decode() for d in devices_ptr[:num_devices.value]] + return [f'{d.backend.decode()}:{d.name.decode()}' for d in devices_ptr[:num_devices.value]] def init_gpu(self, device: str): if self.model is None: diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index 6424fc53..af47c408 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -5,6 +5,7 @@ from __future__ import annotations import hashlib import os +import platform import re import sys import time @@ -44,7 +45,7 @@ class Embed4All: MIN_DIMENSIONALITY = 64 - def __init__(self, model_name: str | None = None, *, n_threads: int | None = None, device: str | None = "cpu", **kwargs: Any): + def __init__(self, model_name: str | None = None, *, n_threads: int | None = None, device: str | None = None, **kwargs: Any): """ Constructor @@ -172,7 +173,7 @@ class GPT4All: model_type: str | None = None, allow_download: bool = True, n_threads: int | None = None, - device: str | None = "cpu", + device: str | None = None, n_ctx: int = 2048, ngl: int = 100, verbose: bool = False, @@ -190,30 +191,56 @@ class GPT4All: n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically. device: The processing unit on which the GPT4All model will run. It can be set to: - "cpu": Model will run on the central processing unit. - - "gpu": Model will run on the best available graphics processing unit, irrespective of its vendor. - - "amd", "nvidia", "intel": Model will run on the best available GPU from the specified vendor. + - "gpu": Use Metal on ARM64 macOS, otherwise the same as "kompute". + - "kompute": Use the best GPU provided by the Kompute backend. + - "cuda": Use the best GPU provided by the CUDA backend. + - "amd", "nvidia": Use the best GPU provided by the Kompute backend from this vendor. - A specific device name from the list returned by `GPT4All.list_gpus()`. - Default is "cpu". + Default is Metal on ARM64 macOS, "cpu" otherwise. Note: If a selected GPU device does not have sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the model. n_ctx: Maximum size of context window ngl: Number of GPU layers to use (Vulkan) verbose: If True, print debug messages. """ + self.model_type = model_type + self._history: list[MessageType] | None = None + self._current_prompt_template: str = "{0}" + + device_init = None + if sys.platform == 'darwin': + if device is None: + backend = 'auto' # 'auto' is effectively 'metal' due to currently non-functional fallback + elif device == 'cpu': + backend = 'cpu' + else: + if platform.machine() != 'arm64' or device != 'gpu': + raise ValueError(f'Unknown device for this platform: {device}') + backend = 'metal' + else: + backend = 'kompute' + if device is None or device == 'cpu': + pass # use kompute with no device + elif device in ('cuda', 'kompute'): + backend = device + device_init = 'gpu' + elif device.startswith('cuda:'): + backend = 'cuda' + device_init = device.removeprefix('cuda:') + else: + device_init = device.removeprefix('kompute:') + # Retrieve model and download if allowed self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose) - self.model = LLModel(self.config["path"], n_ctx, ngl) - if device is not None and device != "cpu": - self.model.init_gpu(device) + self.model = LLModel(self.config["path"], n_ctx, ngl, backend) + if device_init is not None: + self.model.init_gpu(device_init) self.model.load_model() # Set n_threads if n_threads is not None: self.model.set_thread_count(n_threads) - self._history: list[MessageType] | None = None - self._current_prompt_template: str = "{0}" - def __enter__(self) -> Self: return self @@ -227,13 +254,13 @@ class GPT4All: self.model.close() @property - def backend(self) -> Literal["cpu", "kompute", "metal"]: - """The name of the llama.cpp backend currently in use. One of "cpu", "kompute", or "metal".""" + def backend(self) -> Literal["cpu", "kompute", "cuda", "metal"]: + """The name of the llama.cpp backend currently in use. One of "cpu", "kompute", "cuda", or "metal".""" return self.model.backend @property def device(self) -> str | None: - """The name of the GPU device currently in use, or None for backends other than Kompute.""" + """The name of the GPU device currently in use, or None for backends other than Kompute or CUDA.""" return self.model.device @property diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index 9e6a76ea..19e9d4bf 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -45,7 +45,7 @@ def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir): d = os.path.join(dest_dir, item) shutil.copy2(s, d) files_copied += 1 - if item.endswith(lib_ext) or item.endswith('.metal'): + if item.endswith(lib_ext) or item.endswith('.metallib'): s = os.path.join(dirpath, item) d = os.path.join(dest_build_dir, item) shutil.copy2(s, d) @@ -68,7 +68,7 @@ def get_long_description(): setup( name=package_name, - version="2.6.0", + version="2.7.0", description="Python bindings for GPT4All", long_description=get_long_description(), long_description_content_type="text/markdown", diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 27173862..cb3519d2 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -17,8 +17,8 @@ if(APPLE) endif() set(APP_VERSION_MAJOR 2) -set(APP_VERSION_MINOR 7) -set(APP_VERSION_PATCH 6) +set(APP_VERSION_MINOR 8) +set(APP_VERSION_PATCH 0) set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}") # Include the binary directory for the generated header file @@ -65,7 +65,7 @@ add_subdirectory(../gpt4all-backend llmodel) set(METAL_SHADER_FILE) if(${CMAKE_SYSTEM_NAME} MATCHES Darwin) - set(METAL_SHADER_FILE ../gpt4all-backend/llama.cpp-mainline/ggml-metal.metal) + set(METAL_SHADER_FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib) endif() set(APP_ICON_RESOURCE) @@ -185,7 +185,6 @@ if(METAL_SHADER_FILE) set_target_properties(chat PROPERTIES RESOURCE ${METAL_SHADER_FILE} ) - configure_file(${METAL_SHADER_FILE} bin/ggml-metal.metal COPYONLY) endif() target_compile_definitions(chat @@ -207,18 +206,61 @@ if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) endif() install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) -install(TARGETS llmodel DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) + +install( + TARGETS llmodel + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll +) # We should probably iterate through the list of the cmake for backend, but these need to be installed # to the this component's dir for the finicky qt installer to work -install(TARGETS gptj-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) -install(TARGETS gptj-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) -install(TARGETS llama-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) -install(TARGETS llama-mainline-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) -install(TARGETS llamamodel-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) -install(TARGETS llamamodel-mainline-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) -if(APPLE) -install(TARGETS llamamodel-mainline-metal DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) +if (LLMODEL_KOMPUTE) + set(MODEL_IMPL_TARGETS + llamamodel-mainline-kompute + llamamodel-mainline-kompute-avxonly + gptj-kompute + gptj-kompute-avxonly + ) +else() + set(MODEL_IMPL_TARGETS + llamamodel-mainline-cpu + llamamodel-mainline-cpu-avxonly + gptj-cpu + gptj-cpu-avxonly + ) +endif() + +if (APPLE) + list(APPEND MODEL_IMPL_TARGETS llamamodel-mainline-metal) +endif() + +install( + TARGETS ${MODEL_IMPL_TARGETS} + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll +) + +if (LLMODEL_CUDA) + set_property(TARGET llamamodel-mainline-cuda llamamodel-mainline-cuda-avxonly + APPEND PROPERTY INSTALL_RPATH "$ORIGIN") + + install( + TARGETS llamamodel-mainline-cuda + llamamodel-mainline-cuda-avxonly + RUNTIME_DEPENDENCY_SET llama-cuda-deps + LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib + RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll + ) + if (WIN32) + install( + RUNTIME_DEPENDENCY_SET llama-cuda-deps + PRE_EXCLUDE_REGEXES "^(nvcuda|api-ms-.*)\\.dll$" + POST_INCLUDE_REGEXES "(^|[/\\\\])(lib)?(cuda|cublas)" POST_EXCLUDE_REGEXES . + DIRECTORIES "${CUDAToolkit_BIN_DIR}" + DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} + ) + endif() endif() set(CPACK_GENERATOR "IFW") diff --git a/gpt4all-chat/build_and_run.md b/gpt4all-chat/build_and_run.md index 25d28c7f..16f0f8fb 100644 --- a/gpt4all-chat/build_and_run.md +++ b/gpt4all-chat/build_and_run.md @@ -6,9 +6,9 @@ gpt4all-chat from source. ## Prerequisites -On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home +You will need a compiler. On Windows, you should install Visual Studio with the C++ Development components. On macOS, you will need the full version of Xcode—Xcode Command Line Tools lacks certain required tools. On Linux, you will need a GCC or Clang toolchain with C++ support. -macOS users do not need Vulkan, as GPT4All will use Metal instead. +On Windows and Linux, building GPT4All with full GPU support requires the [Vulkan SDK](https://vulkan.lunarg.com/sdk/home) and the latest [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads). ## Note for Linux users diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 333cdaa9..999c233d 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -143,7 +143,7 @@ void ChatLLM::handleThreadStarted() void ChatLLM::handleForceMetalChanged(bool forceMetal) { -#if defined(Q_OS_MAC) && defined(__arm__) +#if defined(Q_OS_MAC) && defined(__aarch64__) m_forceMetal = forceMetal; if (isModelLoaded() && m_shouldBeLoaded) { m_reloadingToChangeVariant = true; @@ -324,19 +324,29 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) QElapsedTimer modelLoadTimer; modelLoadTimer.start(); + auto requestedDevice = MySettings::globalInstance()->device(); auto n_ctx = MySettings::globalInstance()->modelContextLength(modelInfo); m_ctx.n_ctx = n_ctx; auto ngl = MySettings::globalInstance()->modelGpuLayers(modelInfo); - std::string buildVariant = "auto"; -#if defined(Q_OS_MAC) && defined(__arm__) - if (m_forceMetal) - buildVariant = "metal"; + std::string backend = "auto"; +#ifdef Q_OS_MAC + if (requestedDevice == "CPU") { + backend = "cpu"; + } else if (m_forceMetal) { +#ifdef __aarch64__ + backend = "metal"; #endif + } +#else // !defined(Q_OS_MAC) + if (requestedDevice.startsWith("CUDA: ")) + backend = "cuda"; +#endif + QString constructError; m_llModelInfo.model.reset(); try { - auto *model = LLModel::Implementation::construct(filePath.toStdString(), buildVariant, n_ctx); + auto *model = LLModel::Implementation::construct(filePath.toStdString(), backend, n_ctx); m_llModelInfo.model.reset(model); } catch (const LLModel::MissingImplementationError &e) { modelLoadProps.insert("error", "missing_model_impl"); @@ -378,6 +388,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) { const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString(), n_ctx, ngl); availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory); + // Pick the best device + // NB: relies on the fact that Kompute devices are listed first if (!availableDevices.empty() && availableDevices.front().type == 2 /*a discrete gpu*/) { defaultDevice = &availableDevices.front(); float memGB = defaultDevice->heapSize / float(1024 * 1024 * 1024); @@ -387,16 +399,18 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) } } - const QString requestedDevice = MySettings::globalInstance()->device(); - bool isMetal = m_llModelInfo.model->implementation().buildVariant() == "metal"; + QString actualDevice("CPU"); - // Pick the best match for the device - QString actualDevice = isMetal ? "Metal" : "CPU"; - if (!isMetal && requestedDevice != "CPU") { +#if defined(Q_OS_MAC) && defined(__aarch64__) + if (m_llModelInfo.model->implementation().buildVariant() == "metal") + actualDevice = "Metal"; +#else + if (requestedDevice != "CPU") { const auto *device = defaultDevice; if (requestedDevice != "Auto") { + // Use the selected device for (const LLModel::GPUDevice &d : availableDevices) { - if (QString::fromStdString(d.name) == requestedDevice) { + if (QString::fromStdString(d.selectionName()) == requestedDevice) { device = &d; break; } @@ -409,14 +423,14 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) } else if (!m_llModelInfo.model->initializeGPUDevice(device->index, &unavail_reason)) { emit reportFallbackReason(QString::fromStdString("
" + unavail_reason)); } else { - actualDevice = QString::fromStdString(device->name); + actualDevice = QString::fromStdString(device->reportedName()); modelLoadProps.insert("requested_device_mem", approxDeviceMemGB(device)); } } +#endif // Report which device we're actually using emit reportDevice(actualDevice); - bool success = m_llModelInfo.model->loadModel(filePath.toStdString(), n_ctx, ngl); if (!m_shouldBeLoaded) { diff --git a/gpt4all-chat/cmake/deploy-qt-linux.cmake.in b/gpt4all-chat/cmake/deploy-qt-linux.cmake.in index b560736f..97a2a4a9 100644 --- a/gpt4all-chat/cmake/deploy-qt-linux.cmake.in +++ b/gpt4all-chat/cmake/deploy-qt-linux.cmake.in @@ -5,10 +5,7 @@ set(DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN set(BIN_DIR ${DATA_DIR}/bin) set(Qt6_ROOT_DIR "@Qt6_ROOT_DIR@") set(ENV{LD_LIBRARY_PATH} "${BIN_DIR}:${Qt6_ROOT_DIR}/../lib/") -execute_process(COMMAND ${LINUXDEPLOYQT} ${BIN_DIR}/chat -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -bundle-non-qt-libs -qmake=${Qt6_ROOT_DIR}/bin/qmake -verbose=2) -file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/*llmodel.*) -file(COPY ${MYLLMODELLIBS} - DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin) +execute_process(COMMAND ${LINUXDEPLOYQT} ${BIN_DIR}/chat -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -bundle-non-qt-libs -qmake=${Qt6_ROOT_DIR}/bin/qmake -verbose=2 -exclude-libs=libcuda.so.1) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/logo-32.png" DESTINATION ${DATA_DIR}) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/logo-48.png" diff --git a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in index 89d6f42c..d4a637db 100644 --- a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in +++ b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in @@ -4,14 +4,11 @@ set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@") execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2) file(GLOB MYGPTJLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libgptj*) file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*) -file(GLOB MYBERTLLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libbert*) file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*) file(COPY ${MYGPTJLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY ${MYLLAMALIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) -file(COPY ${MYBERTLLIBS} - DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY ${MYLLMODELLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/logo-32.png" diff --git a/gpt4all-chat/cmake/deploy-qt-windows.cmake.in b/gpt4all-chat/cmake/deploy-qt-windows.cmake.in index df1f9b0c..7859474a 100644 --- a/gpt4all-chat/cmake/deploy-qt-windows.cmake.in +++ b/gpt4all-chat/cmake/deploy-qt-windows.cmake.in @@ -2,9 +2,6 @@ set(WINDEPLOYQT "@WINDEPLOYQT@") set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@") set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@") execute_process(COMMAND ${WINDEPLOYQT} --qmldir ${CMAKE_CURRENT_SOURCE_DIR} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin) -file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/*llmodel.*) -file(COPY ${MYLLMODELLIBS} - DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/logo-32.png" DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/logo-48.png" diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp index 3feaea21..d5e72633 100644 --- a/gpt4all-chat/mysettings.cpp +++ b/gpt4all-chat/mysettings.cpp @@ -65,10 +65,14 @@ MySettings::MySettings() { QSettings::setDefaultFormat(QSettings::IniFormat); - std::vector devices = LLModel::Implementation::availableGPUDevices(); QVector deviceList{ "Auto" }; +#if defined(Q_OS_MAC) && defined(__aarch64__) + deviceList << "Metal"; +#else + std::vector devices = LLModel::Implementation::availableGPUDevices(); for (LLModel::GPUDevice &d : devices) - deviceList << QString::fromStdString(d.name); + deviceList << QString::fromStdString(d.selectionName()); +#endif deviceList << "CPU"; setDeviceList(deviceList); } @@ -786,7 +790,23 @@ QString MySettings::device() const { QSettings setting; setting.sync(); - return setting.value("device", default_device).toString(); + auto value = setting.value("device"); + if (!value.isValid()) + return default_device; + + auto device = value.toString(); + if (!device.isEmpty()) { + auto deviceStr = device.toStdString(); + auto newNameStr = LLModel::GPUDevice::updateSelectionName(deviceStr); + if (newNameStr != deviceStr) { + auto newName = QString::fromStdString(newNameStr); + qWarning() << "updating device name:" << device << "->" << newName; + device = newName; + setting.setValue("device", device); + setting.sync(); + } + } + return device; } void MySettings::setDevice(const QString &u)