2023-10-12 08:57:30 +02:00
|
|
|
{ lib
|
2024-03-30 19:29:04 +01:00
|
|
|
, autoAddDriverRunpath
|
2023-10-12 08:57:30 +02:00
|
|
|
, cmake
|
|
|
|
, darwin
|
|
|
|
, fetchFromGitHub
|
|
|
|
, nix-update-script
|
|
|
|
, stdenv
|
|
|
|
|
|
|
|
, config
|
|
|
|
, cudaSupport ? config.cudaSupport
|
|
|
|
, cudaPackages ? { }
|
|
|
|
|
2023-11-14 01:51:57 +01:00
|
|
|
, rocmSupport ? config.rocmSupport
|
2023-10-12 08:57:30 +02:00
|
|
|
, rocmPackages ? { }
|
|
|
|
|
|
|
|
, openclSupport ? false
|
|
|
|
, clblast
|
|
|
|
|
2024-02-29 01:44:34 +01:00
|
|
|
, blasSupport ? builtins.all (x: !x) [ cudaSupport metalSupport openclSupport rocmSupport vulkanSupport ]
|
2024-03-25 18:55:45 +01:00
|
|
|
, blas
|
|
|
|
|
2023-10-12 08:57:30 +02:00
|
|
|
, pkg-config
|
2023-12-31 16:57:28 +01:00
|
|
|
, metalSupport ? stdenv.isDarwin && stdenv.isAarch64 && !openclSupport
|
2024-02-29 01:44:34 +01:00
|
|
|
, vulkanSupport ? false
|
|
|
|
, mpiSupport ? false # Increases the runtime closure by ~700M
|
|
|
|
, vulkan-headers
|
|
|
|
, vulkan-loader
|
|
|
|
, ninja
|
|
|
|
, git
|
|
|
|
, mpi
|
2023-10-12 08:57:30 +02:00
|
|
|
}:
|
|
|
|
|
|
|
|
let
|
2023-12-31 16:57:28 +01:00
|
|
|
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
|
|
|
# otherwise we get libstdc++ errors downstream.
|
|
|
|
# cuda imposes an upper bound on the gcc version, e.g. the latest gcc compatible with cudaPackages_11 is gcc11
|
|
|
|
effectiveStdenv = if cudaSupport then cudaPackages.backendStdenv else stdenv;
|
2024-02-29 01:44:34 +01:00
|
|
|
inherit (lib) cmakeBool cmakeFeature optionals;
|
|
|
|
|
|
|
|
darwinBuildInputs =
|
|
|
|
with darwin.apple_sdk.frameworks;
|
|
|
|
[
|
|
|
|
Accelerate
|
|
|
|
CoreVideo
|
|
|
|
CoreGraphics
|
|
|
|
]
|
|
|
|
++ optionals metalSupport [ MetalKit ];
|
|
|
|
|
|
|
|
cudaBuildInputs = with cudaPackages; [
|
|
|
|
cuda_cccl.dev # <nv/target>
|
|
|
|
|
|
|
|
# A temporary hack for reducing the closure size, remove once cudaPackages
|
|
|
|
# have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792
|
|
|
|
cuda_cudart.dev
|
|
|
|
cuda_cudart.lib
|
|
|
|
cuda_cudart.static
|
|
|
|
libcublas.dev
|
|
|
|
libcublas.lib
|
|
|
|
libcublas.static
|
|
|
|
];
|
|
|
|
|
|
|
|
rocmBuildInputs = with rocmPackages; [
|
|
|
|
clr
|
|
|
|
hipblas
|
|
|
|
rocblas
|
|
|
|
];
|
|
|
|
|
|
|
|
vulkanBuildInputs = [
|
|
|
|
vulkan-headers
|
|
|
|
vulkan-loader
|
|
|
|
];
|
2023-10-12 08:57:30 +02:00
|
|
|
in
|
2023-12-31 16:57:28 +01:00
|
|
|
effectiveStdenv.mkDerivation (finalAttrs: {
|
2023-10-12 08:57:30 +02:00
|
|
|
pname = "llama-cpp";
|
2024-04-26 20:14:07 +02:00
|
|
|
version = "2746";
|
2023-10-12 08:57:30 +02:00
|
|
|
|
|
|
|
src = fetchFromGitHub {
|
|
|
|
owner = "ggerganov";
|
|
|
|
repo = "llama.cpp";
|
|
|
|
rev = "refs/tags/b${finalAttrs.version}";
|
2024-04-22 05:43:43 +02:00
|
|
|
hash = "sha256-KrIeZEq6RAz3N47wgtQjlfNzoGcTh3DqOhYBOxJPGzs=";
|
|
|
|
leaveDotGit = true;
|
|
|
|
postFetch = ''
|
|
|
|
git -C "$out" rev-parse --short HEAD > $out/COMMIT
|
|
|
|
find "$out" -name .git -print0 | xargs -0 rm -rf
|
|
|
|
'';
|
2023-10-12 08:57:30 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
postPatch = ''
|
|
|
|
substituteInPlace ./ggml-metal.m \
|
2024-04-22 05:43:43 +02:00
|
|
|
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
|
|
|
|
|
|
|
substituteInPlace ./scripts/build-info.cmake \
|
|
|
|
--replace-fail 'set(BUILD_NUMBER 0)' 'set(BUILD_NUMBER ${finalAttrs.version})' \
|
|
|
|
--replace-fail 'set(BUILD_COMMIT "unknown")' "set(BUILD_COMMIT \"$(cat COMMIT)\")"
|
2023-10-12 08:57:30 +02:00
|
|
|
'';
|
|
|
|
|
2024-02-29 01:44:34 +01:00
|
|
|
nativeBuildInputs = [ cmake ninja pkg-config git ]
|
|
|
|
++ optionals cudaSupport [
|
2023-12-31 16:57:28 +01:00
|
|
|
cudaPackages.cuda_nvcc
|
2024-03-30 19:29:04 +01:00
|
|
|
autoAddDriverRunpath
|
2023-12-31 16:57:28 +01:00
|
|
|
];
|
|
|
|
|
2024-02-29 01:44:34 +01:00
|
|
|
buildInputs = optionals effectiveStdenv.isDarwin darwinBuildInputs
|
|
|
|
++ optionals cudaSupport cudaBuildInputs
|
2024-03-25 18:55:45 +01:00
|
|
|
++ optionals mpiSupport [ mpi ]
|
2024-02-29 01:44:34 +01:00
|
|
|
++ optionals openclSupport [ clblast ]
|
|
|
|
++ optionals rocmSupport rocmBuildInputs
|
2024-03-25 18:55:45 +01:00
|
|
|
++ optionals blasSupport [ blas ]
|
2024-02-29 01:44:34 +01:00
|
|
|
++ optionals vulkanSupport vulkanBuildInputs;
|
2023-10-12 08:57:30 +02:00
|
|
|
|
|
|
|
cmakeFlags = [
|
2024-02-29 01:44:34 +01:00
|
|
|
# -march=native is non-deterministic; override with platform-specific flags if needed
|
|
|
|
(cmakeBool "LLAMA_NATIVE" false)
|
|
|
|
(cmakeBool "BUILD_SHARED_SERVER" true)
|
|
|
|
(cmakeBool "BUILD_SHARED_LIBS" true)
|
|
|
|
(cmakeBool "BUILD_SHARED_LIBS" true)
|
|
|
|
(cmakeBool "LLAMA_BLAS" blasSupport)
|
|
|
|
(cmakeBool "LLAMA_CLBLAST" openclSupport)
|
2024-03-26 18:43:26 +01:00
|
|
|
(cmakeBool "LLAMA_CUDA" cudaSupport)
|
2024-02-29 01:44:34 +01:00
|
|
|
(cmakeBool "LLAMA_HIPBLAS" rocmSupport)
|
|
|
|
(cmakeBool "LLAMA_METAL" metalSupport)
|
|
|
|
(cmakeBool "LLAMA_MPI" mpiSupport)
|
|
|
|
(cmakeBool "LLAMA_VULKAN" vulkanSupport)
|
2023-10-12 08:57:30 +02:00
|
|
|
]
|
2024-02-29 01:44:34 +01:00
|
|
|
++ optionals cudaSupport [
|
|
|
|
(
|
|
|
|
with cudaPackages.flags;
|
|
|
|
cmakeFeature "CMAKE_CUDA_ARCHITECTURES" (
|
|
|
|
builtins.concatStringsSep ";" (map dropDot cudaCapabilities)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
]
|
|
|
|
++ optionals rocmSupport [
|
|
|
|
(cmakeFeature "CMAKE_C_COMPILER" "hipcc")
|
|
|
|
(cmakeFeature "CMAKE_CXX_COMPILER" "hipcc")
|
|
|
|
|
|
|
|
# Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM
|
|
|
|
# in https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt
|
|
|
|
# and select the line that matches the current nixpkgs version of rocBLAS.
|
|
|
|
# Should likely use `rocmPackages.clr.gpuTargets`.
|
|
|
|
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
|
|
|
]
|
2024-03-26 18:48:11 +01:00
|
|
|
++ optionals metalSupport [
|
|
|
|
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
|
|
|
|
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
|
|
|
|
];
|
2024-02-29 01:44:34 +01:00
|
|
|
|
|
|
|
# upstream plans on adding targets at the cmakelevel, remove those
|
|
|
|
# additional steps after that
|
|
|
|
postInstall = ''
|
|
|
|
mv $out/bin/main $out/bin/llama
|
|
|
|
mv $out/bin/server $out/bin/llama-server
|
|
|
|
mkdir -p $out/include
|
|
|
|
cp $src/llama.h $out/include/
|
2023-10-12 08:57:30 +02:00
|
|
|
'';
|
|
|
|
|
|
|
|
passthru.updateScript = nix-update-script {
|
|
|
|
attrPath = "llama-cpp";
|
|
|
|
extraArgs = [ "--version-regex" "b(.*)" ];
|
|
|
|
};
|
|
|
|
|
|
|
|
meta = with lib; {
|
|
|
|
description = "Port of Facebook's LLaMA model in C/C++";
|
|
|
|
homepage = "https://github.com/ggerganov/llama.cpp/";
|
|
|
|
license = licenses.mit;
|
2024-02-29 01:44:34 +01:00
|
|
|
mainProgram = "llama";
|
|
|
|
maintainers = with maintainers; [ dit7ya elohmeier philiptaron ];
|
2023-10-12 08:57:30 +02:00
|
|
|
platforms = platforms.unix;
|
2024-02-29 01:44:34 +01:00
|
|
|
badPlatforms = optionals (cudaSupport || openclSupport) lib.platforms.darwin;
|
|
|
|
broken = (metalSupport && !effectiveStdenv.isDarwin);
|
2023-10-12 08:57:30 +02:00
|
|
|
};
|
|
|
|
})
|