gnu: llama-cpp: Update commit and configure flags for shared library build.
* gnu/packages/machine-learning.scm (lama-cpp): Update to commit a5735e with pkg-config support. [arguments](configure-flags): Add cmake configure flag for shared library build and adjust arguments to make openblas found by cmake. (phases) 'install-python-scripts: Remove references to deleted scripts and add new ones upsteeam. Change-Id: I7c4bc219a22aa9a949e811b340c7cf745b176d14 Signed-off-by: Ludovic Courtès <ludo@gnu.org>
parent
e0b0e3dd07
commit
85f1bfac0b
|
@ -549,8 +549,8 @@ Performance is achieved by using the LLVM JIT compiler.")
|
||||||
(deprecated-package "guile-aiscm-next" guile-aiscm))
|
(deprecated-package "guile-aiscm-next" guile-aiscm))
|
||||||
|
|
||||||
(define-public llama-cpp
|
(define-public llama-cpp
|
||||||
(let ((commit "fed0108491a3a3cbec6c6480dc8667ffff9d7659")
|
(let ((commit "a5735e4426b19a3ebd0c653ad8ac01420458ee95")
|
||||||
(revision "2"))
|
(revision "3"))
|
||||||
(package
|
(package
|
||||||
(name "llama-cpp")
|
(name "llama-cpp")
|
||||||
(version (git-version "0.0.0" revision commit))
|
(version (git-version "0.0.0" revision commit))
|
||||||
|
@ -562,19 +562,27 @@ Performance is achieved by using the LLVM JIT compiler.")
|
||||||
(commit commit)))
|
(commit commit)))
|
||||||
(file-name (git-file-name name version))
|
(file-name (git-file-name name version))
|
||||||
(sha256
|
(sha256
|
||||||
(base32 "16rm9gy0chd6k07crm8rkl2j3hg7y7h0km7k6c8q7bmm2jrd64la"))))
|
(base32 "0nx55wchwf204ld6jygfn37cjrzc4lspwn5v0qk8i6p92499bv0h"))))
|
||||||
(build-system cmake-build-system)
|
(build-system cmake-build-system)
|
||||||
(arguments
|
(arguments
|
||||||
(list
|
(list
|
||||||
#:configure-flags #~'("-DLLAMA_BLAS=ON"
|
#:configure-flags
|
||||||
"-DLLAMA_BLAS_VENDOR=OpenBLAS"
|
#~(list "-DBUILD_SHARED_LIBS=ON"
|
||||||
|
"-DLLAMA_BLAS=ON"
|
||||||
|
"-DLLAMA_BLAS_VENDOR=OpenBLAS"
|
||||||
|
(string-append "-DBLAS_INCLUDE_DIRS="
|
||||||
|
#$(this-package-input "openblas")
|
||||||
|
"/include")
|
||||||
|
(string-append "-DBLAS_LIBRARIES="
|
||||||
|
#$(this-package-input "openblas")
|
||||||
|
"/lib/libopenblas.so")
|
||||||
|
|
||||||
"-DLLAMA_NATIVE=OFF" ;no '-march=native'
|
"-DLLAMA_NATIVE=OFF" ;no '-march=native'
|
||||||
"-DLLAMA_FMA=OFF" ;and no '-mfma', etc.
|
"-DLLAMA_FMA=OFF" ;and no '-mfma', etc.
|
||||||
"-DLLAMA_AVX2=OFF"
|
"-DLLAMA_AVX2=OFF"
|
||||||
"-DLLAMA_AVX512=OFF"
|
"-DLLAMA_AVX512=OFF"
|
||||||
"-DLLAMA_AVX512_VBMI=OFF"
|
"-DLLAMA_AVX512_VBMI=OFF"
|
||||||
"-DLLAMA_AVX512_VNNI=OFF")
|
"-DLLAMA_AVX512_VNNI=OFF")
|
||||||
|
|
||||||
#:modules '((ice-9 textual-ports)
|
#:modules '((ice-9 textual-ports)
|
||||||
(guix build utils)
|
(guix build utils)
|
||||||
|
@ -609,14 +617,14 @@ Performance is achieved by using the LLVM JIT compiler.")
|
||||||
(mkdir-p bin)
|
(mkdir-p bin)
|
||||||
(make-script "convert-hf-to-gguf")
|
(make-script "convert-hf-to-gguf")
|
||||||
(make-script "convert-llama-ggml-to-gguf")
|
(make-script "convert-llama-ggml-to-gguf")
|
||||||
(make-script "convert-lora-to-ggml")
|
(make-script "convert-hf-to-gguf-update.py"))))
|
||||||
(make-script "convert-persimmon-to-gguf")
|
|
||||||
(make-script "convert"))))
|
|
||||||
(add-after 'install-python-scripts 'wrap-python-scripts
|
(add-after 'install-python-scripts 'wrap-python-scripts
|
||||||
(assoc-ref python:%standard-phases 'wrap))
|
(assoc-ref python:%standard-phases 'wrap))
|
||||||
(add-after 'install 'install-main
|
(add-after 'install 'install-main
|
||||||
(lambda _
|
(lambda _
|
||||||
(copy-file "bin/main" (string-append #$output "/bin/llama")))))))
|
(with-directory-excursion (string-append #$output "/bin")
|
||||||
|
(symlink "main" "llama"))))
|
||||||
|
)))
|
||||||
(inputs (list python))
|
(inputs (list python))
|
||||||
(native-inputs (list pkg-config))
|
(native-inputs (list pkg-config))
|
||||||
(propagated-inputs
|
(propagated-inputs
|
||||||
|
|
Reference in New Issue