[PATCH 0/2] gnu: llama-cpp: Update to 0.0.0-b4882.

  • Open
  • quality assurance status badge
Details
2 participants
  • Morgan Smith
  • Christopher Baines
Owner
unassigned
Submitted by
Morgan Smith
Severity
normal

Debbugs page

M
M
Morgan Smith wrote on 13 Mar 12:42 -0700
(address . guix-patches@gnu.org)(name . Morgan Smith)(address . Morgan.J.Smith@outlook.com)
CH3PR84MB342448E7EB63F85EA958565DC5D32@CH3PR84MB3424.NAMPRD84.PROD.OUTLOOK.COM
I was having some troubles running llama-cpp as it didn't have the ability to
download things and the python scripts didn't seem to have their dependencies.
This no longer installs "convert_hf_to_gguf_update.py" but that didn't work
before this patch series anyways.

Morgan Smith (2):
gnu: Add python-gguf-llama-cpp.
gnu: llama-cpp: Update to 0.0.0-b4882.

gnu/local.mk | 1 -
gnu/packages/machine-learning.scm | 49 +++++++++++++------
.../patches/llama-cpp-vulkan-optional.patch | 38 --------------
3 files changed, 35 insertions(+), 53 deletions(-)
delete mode 100644 gnu/packages/patches/llama-cpp-vulkan-optional.patch

--
2.48.1
M
M
Morgan Smith wrote on 13 Mar 14:18 -0700
[PATCH 1/2] gnu: Add python-gguf-llama-cpp.
(address . 76999@debbugs.gnu.org)(name . Morgan Smith)(address . Morgan.J.Smith@outlook.com)
CH3PR84MB3424609A3E0E8CE70C8AF473C5D32@CH3PR84MB3424.NAMPRD84.PROD.OUTLOOK.COM
* gnu/packages/machine-learning.scm (python-gguf-llama-cpp): New variable.

Change-Id: I1c1b5f5956e3acb380b56816d180f53243b741fa
---
gnu/packages/machine-learning.scm | 15 +++++++++++++++
1 file changed, 15 insertions(+)

Toggle diff (28 lines)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 246b004156..ee5feb58fc 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -6490,6 +6490,21 @@ (define-public python-gguf
(description "A Python library for reading and writing GGUF & GGML format ML models.")
(license license:expat)))
+(define-public python-gguf-llama-cpp
+ (package/inherit python-gguf
+ (version "0.16.0")
+ (source (package-source llama-cpp))
+ (propagated-inputs (list python-numpy python-pyyaml python-sentencepiece
+ python-tqdm))
+ (native-inputs (list python-poetry-core))
+ (arguments
+ (substitute-keyword-arguments (package-arguments python-gguf)
+ ((#:phases phases #~%standard-phases)
+ #~(modify-phases #$phases
+ (add-after 'unpack 'chdir
+ (lambda _
+ (chdir "gguf-py")))))))))
+
(define-public python-gymnasium
(package
(name "python-gymnasium")
--
2.48.1
M
M
Morgan Smith wrote on 13 Mar 14:18 -0700
[PATCH 2/2] gnu: llama-cpp: Update to 0.0.0-b4882.
(address . 76999@debbugs.gnu.org)(name . Morgan Smith)(address . Morgan.J.Smith@outlook.com)
CH3PR84MB3424D9E7B6E013CFE20DBB63C5D32@CH3PR84MB3424.NAMPRD84.PROD.OUTLOOK.COM
* gnu/packages/machine-learning.scm (llama-cpp): Update to 0.0.0-b4882.
[inputs]: Add curl, glslang, and python-gguf-llama-cpp.
[native-inputs]: bash -> bash-minimal.
[source, homepage]: Update URL.
[python-scripts]: Check that we can run them.
[fix-tests]: Fix an additional test.
* gnu/packages/patches/llama-cpp-vulkan-optional.patch: Delete.
* gnu/local.mk: Unregister patch.

Change-Id: Ic297534cd142cb83e3964eae21b4eb807b74e9bc
---
gnu/local.mk | 1 -
gnu/packages/machine-learning.scm | 41 +++++++++++--------
.../patches/llama-cpp-vulkan-optional.patch | 38 -----------------
3 files changed, 25 insertions(+), 55 deletions(-)
delete mode 100644 gnu/packages/patches/llama-cpp-vulkan-optional.patch

Toggle diff (135 lines)
diff --git a/gnu/local.mk b/gnu/local.mk
index 5425095e1d..dcff631515 100644
--- a/gnu/local.mk
+++ b/gnu/local.mk
@@ -1841,7 +1841,6 @@ dist_patch_DATA = \
%D%/packages/patches/mcrypt-CVE-2012-4527.patch \
%D%/packages/patches/libmemcached-build-with-gcc7.patch \
%D%/packages/patches/libmhash-hmac-fix-uaf.patch \
- %D%/packages/patches/llama-cpp-vulkan-optional.patch \
%D%/packages/patches/llhttp-ponyfill-object-fromentries.patch \
%D%/packages/patches/lvm2-no-systemd.patch \
%D%/packages/patches/maturin-no-cross-compile.patch \
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index ee5feb58fc..b173f54fec 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -77,6 +77,7 @@ (define-module (gnu packages machine-learning)
#:use-module (gnu packages cmake)
#:use-module (gnu packages cpp)
#:use-module (gnu packages cran)
+ #:use-module (gnu packages curl)
#:use-module (gnu packages databases)
#:use-module (gnu packages dejagnu)
#:use-module (gnu packages documentation)
@@ -585,7 +586,7 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((tag "b4549"))
+ (let ((tag "b4882"))
(package
(name "llama-cpp")
(version (string-append "0.0.0-" tag))
@@ -593,19 +594,19 @@ (define-public llama-cpp
(origin
(method git-fetch)
(uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
+ (url "https://github.com/ggml-org/llama.cpp")
(commit tag)))
(file-name (git-file-name name tag))
(sha256
- (base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))
- (patches
- (search-patches "llama-cpp-vulkan-optional.patch"))))
+ (base32 "1mhh4293lgvyvyq58hpphqk18n5g2zadafpdf9icf7xlj0cf7bqc"))))
(build-system cmake-build-system)
(arguments
(list
#:configure-flags
- #~(list "-DBUILD_SHARED_LIBS=ON"
+ #~(list #$(string-append "-DGGML_BUILD_NUMBER=" tag)
+ "-DBUILD_SHARED_LIBS=ON"
"-DGGML_VULKAN=ON"
+ "-DLLAMA_CURL=ON"
"-DGGML_BLAS=ON"
"-DGGML_BLAS_VENDOR=OpenBLAS"
(string-append "-DBLAS_INCLUDE_DIRS="
@@ -635,13 +636,16 @@ (define-public llama-cpp
(substitute* "ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp"
(("\"/bin/sh\"")
(string-append "\"" (search-input-file inputs "/bin/sh") "\"")))))
- (add-after 'unpack 'disable-unrunable-tests
+ (add-after 'unpack 'fix-tests
(lambda _
;; test-eval-callback downloads ML model from network, cannot
;; run in Guix build environment
(substitute* '("examples/eval-callback/CMakeLists.txt")
(("COMMAND llama-eval-callback")
- "COMMAND true llama-eval-callback"))))
+ "COMMAND true llama-eval-callback"))
+ ;; Help it find the test files it needs
+ (substitute* "tests/test-chat.cpp"
+ (("\"\\.\\./\"") "\"../source/\""))))
(add-before 'install 'install-python-scripts
(lambda _
(let ((bin (string-append #$output "/bin/")))
@@ -657,23 +661,28 @@ (define-public llama-cpp
(get-string-all input))))))
(chmod (string-append bin script) #o555)))
(mkdir-p bin)
- (make-script "convert_hf_to_gguf")
- (make-script "convert_llama_ggml_to_gguf")
- (make-script "convert_hf_to_gguf_update.py"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
+ (for-each
+ (lambda (file)
+ (make-script file)
+ ;; Run script as a sanity check
+ (invoke (string-append bin file) "-h"))
+ '(;; involves adding python-transformers package which looks involved.
+ ;; "convert_hf_to_gguf_update.py"
+ "convert_hf_to_gguf"
+ "convert_llama_ggml_to_gguf")))))
(add-after 'install 'remove-tests
(lambda* (#:key outputs #:allow-other-keys)
(for-each delete-file (find-files
(string-append (assoc-ref outputs "out")
"/bin")
"^test-")))))))
- (inputs (list python vulkan-headers vulkan-loader))
- (native-inputs (list pkg-config shaderc bash))
+ (inputs (list curl glslang python python-gguf-llama-cpp
+ vulkan-headers vulkan-loader))
+ (native-inputs (list pkg-config shaderc bash-minimal))
(propagated-inputs
(list python-numpy python-pytorch python-sentencepiece openblas))
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available
- (home-page "https://github.com/ggerganov/llama.cpp")
+ (home-page "https://github.com/ggml-org/llama.cpp")
(synopsis "Port of Facebook's LLaMA model in C/C++")
(description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
diff --git a/gnu/packages/patches/llama-cpp-vulkan-optional.patch b/gnu/packages/patches/llama-cpp-vulkan-optional.patch
deleted file mode 100644
index 43a49b6a02..0000000000
--- a/gnu/packages/patches/llama-cpp-vulkan-optional.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-Author: Danny Milosavljevic <dannym@friendly-machines.com>
-Date: 2025-01-29
-License: Expat
-Subject: Make Vulkan optional
-
-See also: <https://github.com/ggerganov/llama.cpp/pull/11494>
-
-diff -ru orig/llama.cpp/ggml/include/ggml-vulkan.h llama.cpp/ggml/include/ggml-vulkan.h
---- orig/llama.cpp/ggml/include/ggml-vulkan.h 2025-01-29 10:24:10.894476682 +0100
-+++ llama.cpp/ggml/include/ggml-vulkan.h 2025-02-07 18:28:34.509509638 +0100
-@@ -10,8 +10,6 @@
- #define GGML_VK_NAME "Vulkan"
- #define GGML_VK_MAX_DEVICES 16
-
--GGML_BACKEND_API void ggml_vk_instance_init(void);
--
- // backend API
- GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
-
-diff -ru orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp
---- orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 10:24:10.922476480 +0100
-+++ llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 22:33:19.955087552 +0100
-@@ -8174,8 +8174,13 @@
- /* .iface = */ ggml_backend_vk_reg_i,
- /* .context = */ nullptr,
- };
--
-- return &reg;
-+ try {
-+ ggml_vk_instance_init();
-+ return &reg;
-+ } catch (const vk::SystemError& e) {
-+ VK_LOG_DEBUG("ggml_vk_get_device_count() -> Error: System error: " << e.what());
-+ return nullptr;
-+ }
- }
-
- // Extension availability
--
2.48.1
M
M
Morgan Smith wrote 6 days ago
[PATCH v2 1/2] gnu: Add python-gguf-llama-cpp.
(address . 76999@debbugs.gnu.org)(name . Morgan Smith)(address . Morgan.J.Smith@outlook.com)
CH3PR84MB3424CA14474F88C0E8FB0F7BC5AD2@CH3PR84MB3424.NAMPRD84.PROD.OUTLOOK.COM
* gnu/packages/machine-learning.scm (python-gguf-llama-cpp): New variable.

Change-Id: I1c1b5f5956e3acb380b56816d180f53243b741fa
---
gnu/packages/machine-learning.scm | 15 +++++++++++++++
1 file changed, 15 insertions(+)

Toggle diff (30 lines)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 7fdf5f37ee..7cb807ae91 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -6544,6 +6544,21 @@ (define-public python-gguf
(description "A Python library for reading and writing GGUF & GGML format ML models.")
(license license:expat)))
+(define-public python-gguf-llama-cpp
+ (package/inherit python-gguf
+ (version "0.16.0")
+ (source (package-source llama-cpp))
+ (propagated-inputs (list python-numpy python-pyyaml python-sentencepiece
+ python-tqdm))
+ (native-inputs (list python-poetry-core))
+ (arguments
+ (substitute-keyword-arguments (package-arguments python-gguf)
+ ((#:phases phases #~%standard-phases)
+ #~(modify-phases #$phases
+ (add-after 'unpack 'chdir
+ (lambda _
+ (chdir "gguf-py")))))))))
+
(define-public python-gymnasium
(package
(name "python-gymnasium")

base-commit: e2c2f98edd5d64921678c2570439dedfe662b1f8
--
2.49.0
M
M
Morgan Smith wrote 6 days ago
[PATCH v2 2/2] gnu: llama-cpp: Update to 0.0.0-b5013.
(address . 76999@debbugs.gnu.org)(name . Morgan Smith)(address . Morgan.J.Smith@outlook.com)
CH3PR84MB3424D4B9B5AAFFE7894C0986C5AD2@CH3PR84MB3424.NAMPRD84.PROD.OUTLOOK.COM
* gnu/packages/machine-learning.scm (llama-cpp): Update to 0.0.0-b5013.
[inputs]: Add curl, glslang, and python-gguf-llama-cpp.
[native-inputs]: bash -> bash-minimal.
[source, homepage]: Update URL.
[python-scripts]: Rely on upstream to install them. Delete phase.
[fix-tests]: Fix an additional test.
* gnu/packages/patches/llama-cpp-vulkan-optional.patch: Delete.
* gnu/local.mk: Unregister patch.

Change-Id: Ic297534cd142cb83e3964eae21b4eb807b74e9bc
---
gnu/local.mk | 1 -
gnu/packages/machine-learning.scm | 47 +++++++------------
.../patches/llama-cpp-vulkan-optional.patch | 38 ---------------
3 files changed, 17 insertions(+), 69 deletions(-)
delete mode 100644 gnu/packages/patches/llama-cpp-vulkan-optional.patch

Toggle diff (135 lines)
diff --git a/gnu/local.mk b/gnu/local.mk
index f03fcb14fc..00b1a7a959 100644
--- a/gnu/local.mk
+++ b/gnu/local.mk
@@ -1845,7 +1845,6 @@ dist_patch_DATA = \
%D%/packages/patches/libmhash-hmac-fix-uaf.patch \
%D%/packages/patches/libmodbus-disable-networking-test.patch \
%D%/packages/patches/lib-tl-for-telegram-memcpy.patch \
- %D%/packages/patches/llama-cpp-vulkan-optional.patch \
%D%/packages/patches/llhttp-ponyfill-object-fromentries.patch \
%D%/packages/patches/lvm2-no-systemd.patch \
%D%/packages/patches/maturin-no-cross-compile.patch \
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 7cb807ae91..84be26cf35 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -78,6 +78,7 @@ (define-module (gnu packages machine-learning)
#:use-module (gnu packages cmake)
#:use-module (gnu packages cpp)
#:use-module (gnu packages cran)
+ #:use-module (gnu packages curl)
#:use-module (gnu packages databases)
#:use-module (gnu packages dejagnu)
#:use-module (gnu packages documentation)
@@ -634,7 +635,7 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((tag "b4549"))
+ (let ((tag "b5013"))
(package
(name "llama-cpp")
(version (string-append "0.0.0-" tag))
@@ -642,19 +643,19 @@ (define-public llama-cpp
(origin
(method git-fetch)
(uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
+ (url "https://github.com/ggml-org/llama.cpp")
(commit tag)))
(file-name (git-file-name name tag))
(sha256
- (base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))
- (patches
- (search-patches "llama-cpp-vulkan-optional.patch"))))
+ (base32 "0s73dz871x53dr366lkzq19f677bwgma2ri8m5vhbfa9p8yp4p3r"))))
(build-system cmake-build-system)
(arguments
(list
#:configure-flags
- #~(list "-DBUILD_SHARED_LIBS=ON"
+ #~(list #$(string-append "-DGGML_BUILD_NUMBER=" tag)
+ "-DBUILD_SHARED_LIBS=ON"
"-DGGML_VULKAN=ON"
+ "-DLLAMA_CURL=ON"
"-DGGML_BLAS=ON"
"-DGGML_BLAS_VENDOR=OpenBLAS"
(string-append "-DBLAS_INCLUDE_DIRS="
@@ -684,32 +685,17 @@ (define-public llama-cpp
(substitute* "ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp"
(("\"/bin/sh\"")
(string-append "\"" (search-input-file inputs "/bin/sh") "\"")))))
- (add-after 'unpack 'disable-unrunable-tests
+ (add-after 'unpack 'fix-tests
(lambda _
;; test-eval-callback downloads ML model from network, cannot
;; run in Guix build environment
(substitute* '("examples/eval-callback/CMakeLists.txt")
(("COMMAND llama-eval-callback")
- "COMMAND true llama-eval-callback"))))
- (add-before 'install 'install-python-scripts
- (lambda _
- (let ((bin (string-append #$output "/bin/")))
- (define (make-script script)
- (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
- (call-with-input-file
- (string-append "../source/" script suffix)
- (lambda (input)
- (call-with-output-file (string-append bin script)
- (lambda (output)
- (format output "#!~a/bin/python3\n~a"
- #$(this-package-input "python")
- (get-string-all input))))))
- (chmod (string-append bin script) #o555)))
- (mkdir-p bin)
- (make-script "convert_hf_to_gguf")
- (make-script "convert_llama_ggml_to_gguf")
- (make-script "convert_hf_to_gguf_update.py"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
+ "COMMAND true llama-eval-callback"))
+ ;; Help it find the test files it needs
+ (substitute* "tests/test-chat.cpp"
+ (("\"\\.\\./\"") "\"../source/\""))))
+ (add-after 'install 'wrap-python-scripts
(assoc-ref python:%standard-phases 'wrap))
(add-after 'install 'remove-tests
(lambda* (#:key outputs #:allow-other-keys)
@@ -717,12 +703,13 @@ (define-public llama-cpp
(string-append (assoc-ref outputs "out")
"/bin")
"^test-")))))))
- (inputs (list python vulkan-headers vulkan-loader))
- (native-inputs (list pkg-config shaderc bash))
+ (inputs (list curl glslang python python-gguf-llama-cpp
+ vulkan-headers vulkan-loader))
+ (native-inputs (list pkg-config shaderc bash-minimal))
(propagated-inputs
(list python-numpy python-pytorch python-sentencepiece openblas))
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available
- (home-page "https://github.com/ggerganov/llama.cpp")
+ (home-page "https://github.com/ggml-org/llama.cpp")
(synopsis "Port of Facebook's LLaMA model in C/C++")
(description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
diff --git a/gnu/packages/patches/llama-cpp-vulkan-optional.patch b/gnu/packages/patches/llama-cpp-vulkan-optional.patch
deleted file mode 100644
index 43a49b6a02..0000000000
--- a/gnu/packages/patches/llama-cpp-vulkan-optional.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-Author: Danny Milosavljevic <dannym@friendly-machines.com>
-Date: 2025-01-29
-License: Expat
-Subject: Make Vulkan optional
-
-See also: <https://github.com/ggerganov/llama.cpp/pull/11494>
-
-diff -ru orig/llama.cpp/ggml/include/ggml-vulkan.h llama.cpp/ggml/include/ggml-vulkan.h
---- orig/llama.cpp/ggml/include/ggml-vulkan.h 2025-01-29 10:24:10.894476682 +0100
-+++ llama.cpp/ggml/include/ggml-vulkan.h 2025-02-07 18:28:34.509509638 +0100
-@@ -10,8 +10,6 @@
- #define GGML_VK_NAME "Vulkan"
- #define GGML_VK_MAX_DEVICES 16
-
--GGML_BACKEND_API void ggml_vk_instance_init(void);
--
- // backend API
- GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
-
-diff -ru orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp
---- orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 10:24:10.922476480 +0100
-+++ llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 22:33:19.955087552 +0100
-@@ -8174,8 +8174,13 @@
- /* .iface = */ ggml_backend_vk_reg_i,
- /* .context = */ nullptr,
- };
--
-- return &reg;
-+ try {
-+ ggml_vk_instance_init();
-+ return &reg;
-+ } catch (const vk::SystemError& e) {
-+ VK_LOG_DEBUG("ggml_vk_get_device_count() -> Error: System error: " << e.what());
-+ return nullptr;
-+ }
- }
-
- // Extension availability
--
2.49.0
C
C
Christopher Baines wrote 5 hours ago
Re: [bug#76999] [PATCH 1/2] gnu: Add python-gguf-llama-cpp.
(name . Morgan Smith)(address . Morgan.J.Smith@outlook.com)(address . 76999@debbugs.gnu.org)
87y0wdw9kh.fsf@cbaines.net
Morgan Smith <Morgan.J.Smith@outlook.com> writes:

Toggle quote (34 lines)
> * gnu/packages/machine-learning.scm (python-gguf-llama-cpp): New variable.
>
> Change-Id: I1c1b5f5956e3acb380b56816d180f53243b741fa
> ---
> gnu/packages/machine-learning.scm | 15 +++++++++++++++
> 1 file changed, 15 insertions(+)
>
> diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
> index 246b004156..ee5feb58fc 100644
> --- a/gnu/packages/machine-learning.scm
> +++ b/gnu/packages/machine-learning.scm
> @@ -6490,6 +6490,21 @@ (define-public python-gguf
> (description "A Python library for reading and writing GGUF & GGML format ML models.")
> (license license:expat)))
>
> +(define-public python-gguf-llama-cpp
> + (package/inherit python-gguf
> + (version "0.16.0")
> + (source (package-source llama-cpp))
> + (propagated-inputs (list python-numpy python-pyyaml python-sentencepiece
> + python-tqdm))
> + (native-inputs (list python-poetry-core))
> + (arguments
> + (substitute-keyword-arguments (package-arguments python-gguf)
> + ((#:phases phases #~%standard-phases)
> + #~(modify-phases #$phases
> + (add-after 'unpack 'chdir
> + (lambda _
> + (chdir "gguf-py")))))))))
> +
> (define-public python-gymnasium
> (package
> (name "python-gymnasium")

Can python-gguf be updated rather than adding this package?
-----BEGIN PGP SIGNATURE-----

iQKlBAEBCgCPFiEEPonu50WOcg2XVOCyXiijOwuE9XcFAmfyqI5fFIAAAAAALgAo
aXNzdWVyLWZwckBub3RhdGlvbnMub3BlbnBncC5maWZ0aGhvcnNlbWFuLm5ldDNF
ODlFRUU3NDU4RTcyMEQ5NzU0RTBCMjVFMjhBMzNCMEI4NEY1NzcRHG1haWxAY2Jh
aW5lcy5uZXQACgkQXiijOwuE9Xe2LQ/+Mb1LAKLiFGqUyj+4vG9WPxr0O8nKK4I2
2MipgfPBQWI+U5jSXydFYC2VgAmGHQroQRzKEG/K20tvfMvp+Rvi4w9LVHSQdItb
rpGTzeBA7jBNotI5LZOOE21bQp9L5YH9+MEkhuoyPIibgA45PzcX3STtX586YUQ2
kF+aC8PwGj7MiDocyKNZj34KkaMY0HlQsDihcvRSnfDDJUneKBbnQI6X47SuNe7V
KLiHiNz7TzOVxMNroRdauYfVEyZRXH93e/4osP+A9Iz8ezjE/8qzsoCAM32AguY8
fgPC/kWsFBKUm+4UcYKL2M8HTj2b/pQLD9NzUoroYsf1rTctu+6kGGFIFy7jlmnL
khI6BKUF2JOlUvZVDLHy5sMANMZXx2ZswB9rN0gk5bRCSQtCsDW+5vbNJH7ay8Bk
QiQYv0oxKxxWYhHMqVdlp/O3FzljQu3LE+HXT6TXRN13x5EFDOd8umhV8kH5PSfF
KKu/GUHVdrN8vnf1sFJYmf8qysiDoIOZgJarHZ+hqxY07/+aN2vzLwA2oiHo3X/I
KOt6OJrd2Eh2ildmnbbt1t7bzhvK0od8TJeipKKg05ffRios+SzfNsteHY8wxc80
798GazTYlvvoNPmXocQfGB9iHCt19Ew4UxnTO+TesDuBp3cdcLiZvYFRmHyeEIaZ
CsKV0fHaja0=
=V97f
-----END PGP SIGNATURE-----

?
Your comment

Commenting via the web interface is currently disabled.

To comment on this conversation send an email to 76999@patchwise.org

To respond to this issue using the mumi CLI, first switch to it
mumi current 76999
Then, you may apply the latest patchset in this issue (with sign off)
mumi am -- -s
Or, compose a reply to this issue
mumi compose
Or, send patches to this issue
mumi send-email *.patch