llama-cpp: embed (don't pre-compile) metal shaders
port of https://github.com/ggerganov/llama.cpp/pull/6118, although compiling shaders with XCode disabled as it requires disabling sandbox (and only works on MacOS anyways)
This commit is contained in:
parent
7aa588cc96
commit
e1ef3aaacc
1 changed files with 4 additions and 1 deletions
|
@ -131,7 +131,10 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
|||
# Should likely use `rocmPackages.clr.gpuTargets`.
|
||||
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
||||
]
|
||||
++ optionals metalSupport [ (cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") ];
|
||||
++ optionals metalSupport [
|
||||
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
|
||||
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
|
||||
];
|
||||
|
||||
# upstream plans on adding targets at the cmakelevel, remove those
|
||||
# additional steps after that
|
||||
|
|
Loading…
Reference in a new issue