llama-cpp: 3091 -> 3260

This commit is contained in:
Lan Tian 2024-06-29 01:54:28 -07:00
parent ce1b2a11a8
commit 134743c02a
No known key found for this signature in database
GPG Key ID: 04E66B6B25A0862B

View File

@ -71,13 +71,13 @@ let
in in
effectiveStdenv.mkDerivation (finalAttrs: { effectiveStdenv.mkDerivation (finalAttrs: {
pname = "llama-cpp"; pname = "llama-cpp";
version = "3091"; version = "3260";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "ggerganov"; owner = "ggerganov";
repo = "llama.cpp"; repo = "llama.cpp";
rev = "refs/tags/b${finalAttrs.version}"; rev = "refs/tags/b${finalAttrs.version}";
hash = "sha256-ppujag6Nrk/M9QMQ4mYe2iADsfKzmfKtOP8Ib7GZBmk="; hash = "sha256-0KVwSzxfGinpv5KkDCgF2J+1ijDv87PlDrC+ldscP6s=";
leaveDotGit = true; leaveDotGit = true;
postFetch = '' postFetch = ''
git -C "$out" rev-parse --short HEAD > $out/COMMIT git -C "$out" rev-parse --short HEAD > $out/COMMIT
@ -86,12 +86,12 @@ effectiveStdenv.mkDerivation (finalAttrs: {
}; };
postPatch = '' postPatch = ''
substituteInPlace ./ggml-metal.m \ substituteInPlace ./ggml/src/ggml-metal.m \
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";" --replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
substituteInPlace ./scripts/build-info.cmake \ substituteInPlace ./scripts/build-info.sh \
--replace-fail 'set(BUILD_NUMBER 0)' 'set(BUILD_NUMBER ${finalAttrs.version})' \ --replace-fail 'build_number="0"' 'build_number="${finalAttrs.version}"' \
--replace-fail 'set(BUILD_COMMIT "unknown")' "set(BUILD_COMMIT \"$(cat COMMIT)\")" --replace-fail 'build_commit="unknown"' "build_commit=\"$(cat COMMIT)\""
''; '';
nativeBuildInputs = [ cmake ninja pkg-config git ] nativeBuildInputs = [ cmake ninja pkg-config git ]
@ -109,17 +109,16 @@ effectiveStdenv.mkDerivation (finalAttrs: {
cmakeFlags = [ cmakeFlags = [
# -march=native is non-deterministic; override with platform-specific flags if needed # -march=native is non-deterministic; override with platform-specific flags if needed
(cmakeBool "LLAMA_NATIVE" false) (cmakeBool "GGML_NATIVE" false)
(cmakeBool "BUILD_SHARED_SERVER" true) (cmakeBool "LLAMA_BUILD_SERVER" true)
(cmakeBool "BUILD_SHARED_LIBS" true) (cmakeBool "BUILD_SHARED_LIBS" true)
(cmakeBool "BUILD_SHARED_LIBS" true) (cmakeBool "GGML_BLAS" blasSupport)
(cmakeBool "LLAMA_BLAS" blasSupport) (cmakeBool "GGML_CLBLAST" openclSupport)
(cmakeBool "LLAMA_CLBLAST" openclSupport) (cmakeBool "GGML_CUDA" cudaSupport)
(cmakeBool "LLAMA_CUDA" cudaSupport) (cmakeBool "GGML_HIPBLAS" rocmSupport)
(cmakeBool "LLAMA_HIPBLAS" rocmSupport) (cmakeBool "GGML_METAL" metalSupport)
(cmakeBool "LLAMA_METAL" metalSupport) (cmakeBool "GGML_RPC" rpcSupport)
(cmakeBool "LLAMA_RPC" rpcSupport) (cmakeBool "GGML_VULKAN" vulkanSupport)
(cmakeBool "LLAMA_VULKAN" vulkanSupport)
] ]
++ optionals cudaSupport [ ++ optionals cudaSupport [
(cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString) (cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString)
@ -138,7 +137,6 @@ effectiveStdenv.mkDerivation (finalAttrs: {
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") (cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true) (cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
] ++ optionals rpcSupport [ ] ++ optionals rpcSupport [
"-DLLAMA_RPC=ON"
# This is done so we can move rpc-server out of bin because llama.cpp doesn't # This is done so we can move rpc-server out of bin because llama.cpp doesn't
# install rpc-server in their install target. # install rpc-server in their install target.
"-DCMAKE_SKIP_BUILD_RPATH=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON"
@ -147,10 +145,11 @@ effectiveStdenv.mkDerivation (finalAttrs: {
# upstream plans on adding targets at the cmakelevel, remove those # upstream plans on adding targets at the cmakelevel, remove those
# additional steps after that # additional steps after that
postInstall = '' postInstall = ''
mv $out/bin/main $out/bin/llama # Match previous binary name for this package
mv $out/bin/server $out/bin/llama-server ln -sf $out/bin/llama-cli $out/bin/llama
mkdir -p $out/include mkdir -p $out/include
cp $src/llama.h $out/include/ cp $src/include/llama.h $out/include/
'' + optionalString rpcSupport "cp bin/rpc-server $out/bin/llama-rpc-server"; '' + optionalString rpcSupport "cp bin/rpc-server $out/bin/llama-rpc-server";
passthru.updateScript = nix-update-script { passthru.updateScript = nix-update-script {