git: e76312c8a2bd - main - misc/llama-cpp: update 3291 → 3306

From: Yuri Victorovich <yuri_at_FreeBSD.org>
Date: Fri, 05 Jul 2024 23:56:10 UTC
The branch main has been updated by yuri:

URL: https://cgit.FreeBSD.org/ports/commit/?id=e76312c8a2bd5669aafc4951ff36e018dc755ec9

commit e76312c8a2bd5669aafc4951ff36e018dc755ec9
Author:     Yuri Victorovich <yuri@FreeBSD.org>
AuthorDate: 2024-07-05 23:55:53 +0000
Commit:     Yuri Victorovich <yuri@FreeBSD.org>
CommitDate: 2024-07-05 23:56:03 +0000

    misc/llama-cpp: update 3291 → 3306
    
    Reported by:    portscout
---
 misc/llama-cpp/Makefile  | 2 +-
 misc/llama-cpp/distinfo  | 6 +++---
 misc/llama-cpp/pkg-plist | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
index 9e174aa09a63..49a326642a5b 100644
--- a/misc/llama-cpp/Makefile
+++ b/misc/llama-cpp/Makefile
@@ -1,6 +1,6 @@
 PORTNAME=	llama-cpp
 DISTVERSIONPREFIX=	b
-DISTVERSION=	3291
+DISTVERSION=	3306
 CATEGORIES=	misc # machine-learning
 
 MAINTAINER=	yuri@FreeBSD.org
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
index 679b87a30184..dd9c1448af49 100644
--- a/misc/llama-cpp/distinfo
+++ b/misc/llama-cpp/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1720110181
-SHA256 (ggerganov-llama.cpp-b3291_GH0.tar.gz) = d089992ab617060f89cf82457050cfe445a416b19811fec89374266f51b9846e
-SIZE (ggerganov-llama.cpp-b3291_GH0.tar.gz) = 20666117
+TIMESTAMP = 1720221863
+SHA256 (ggerganov-llama.cpp-b3306_GH0.tar.gz) = 662004b09c778fad10326ab201470782a09a9b58a0eabfcf0920dff6ffe3edf5
+SIZE (ggerganov-llama.cpp-b3306_GH0.tar.gz) = 20716544
 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
 SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
index e155fcf077a4..4e0a968bd64c 100644
--- a/misc/llama-cpp/pkg-plist
+++ b/misc/llama-cpp/pkg-plist
@@ -1,4 +1,4 @@
-%%EXAMPLES%%bin/convert-hf-to-gguf.py
+%%EXAMPLES%%bin/convert_hf_to_gguf.py
 %%EXAMPLES%%bin/llama-baby-llama
 %%EXAMPLES%%bin/llama-batched
 %%EXAMPLES%%bin/llama-batched-bench