git: 10580f9c51f6 - main - misc/llama-cpp: update 3135 → 3145

From: Yuri Victorovich <yuri_at_FreeBSD.org>
Date: Fri, 14 Jun 2024 09:11:59 UTC
The branch main has been updated by yuri:

URL: https://cgit.FreeBSD.org/ports/commit/?id=10580f9c51f6d16af473cffc0ed622effc046e86

commit 10580f9c51f6d16af473cffc0ed622effc046e86
Author:     Yuri Victorovich <yuri@FreeBSD.org>
AuthorDate: 2024-06-14 08:55:03 +0000
Commit:     Yuri Victorovich <yuri@FreeBSD.org>
CommitDate: 2024-06-14 09:11:39 +0000

    misc/llama-cpp: update 3135 → 3145
    
    Reported by:    portscout
---
 misc/llama-cpp/Makefile  |  2 +-
 misc/llama-cpp/distinfo  |  6 ++---
 misc/llama-cpp/pkg-plist | 67 ++++++++++++++++++++++++------------------------
 3 files changed, 38 insertions(+), 37 deletions(-)

diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile
index 5700d57653b5..ab01926ca833 100644
--- a/misc/llama-cpp/Makefile
+++ b/misc/llama-cpp/Makefile
@@ -1,6 +1,6 @@
 PORTNAME=	llama-cpp
 DISTVERSIONPREFIX=	b
-DISTVERSION=	3135
+DISTVERSION=	3145
 CATEGORIES=	misc # machine-learning
 
 MAINTAINER=	yuri@FreeBSD.org
diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo
index bc8ebdb50bbf..8121e0b72d7e 100644
--- a/misc/llama-cpp/distinfo
+++ b/misc/llama-cpp/distinfo
@@ -1,5 +1,5 @@
-TIMESTAMP = 1718166366
-SHA256 (ggerganov-llama.cpp-b3135_GH0.tar.gz) = 4e5e5284177bf261545899c986df2f195e7703ee1647589a26ccf2e1389b6218
-SIZE (ggerganov-llama.cpp-b3135_GH0.tar.gz) = 20552678
+TIMESTAMP = 1718344488
+SHA256 (ggerganov-llama.cpp-b3145_GH0.tar.gz) = 8fbd54ce330227b8d6fe157e114dfa42d4b11da9d70924840365bce91fe88670
+SIZE (ggerganov-llama.cpp-b3145_GH0.tar.gz) = 20555853
 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc
 SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496
diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist
index 0b5bc0e84cf6..1851cb1e6ee2 100644
--- a/misc/llama-cpp/pkg-plist
+++ b/misc/llama-cpp/pkg-plist
@@ -1,38 +1,39 @@
-%%EXAMPLES%%bin/baby-llama
-%%EXAMPLES%%bin/batched
-%%EXAMPLES%%bin/batched-bench
-%%EXAMPLES%%bin/benchmark
 %%EXAMPLES%%bin/convert-hf-to-gguf.py
-%%EXAMPLES%%bin/convert-llama2c-to-ggml
-%%EXAMPLES%%bin/embedding
-%%EXAMPLES%%bin/eval-callback
-%%EXAMPLES%%bin/export-lora
-%%EXAMPLES%%bin/finetune
-%%EXAMPLES%%bin/gguf
-%%EXAMPLES%%bin/gguf-split
-%%EXAMPLES%%bin/gritlm
-%%EXAMPLES%%bin/imatrix
-%%EXAMPLES%%bin/infill
+%%EXAMPLES%%bin/llama-baby-llama
+%%EXAMPLES%%bin/llama-batched
+%%EXAMPLES%%bin/llama-batched-bench
 %%EXAMPLES%%bin/llama-bench
-%%EXAMPLES%%bin/llava-cli
-%%EXAMPLES%%bin/lookahead
-%%EXAMPLES%%bin/lookup
-%%EXAMPLES%%bin/lookup-create
-%%EXAMPLES%%bin/lookup-merge
-%%EXAMPLES%%bin/lookup-stats
-%%EXAMPLES%%bin/main
-%%EXAMPLES%%bin/parallel
-%%EXAMPLES%%bin/passkey
-%%EXAMPLES%%bin/perplexity
-%%EXAMPLES%%bin/quantize
-%%EXAMPLES%%bin/quantize-stats
-%%EXAMPLES%%bin/retrieval
-%%EXAMPLES%%bin/save-load-state
-%%EXAMPLES%%bin/server
-%%EXAMPLES%%bin/simple
-%%EXAMPLES%%bin/speculative
-%%EXAMPLES%%bin/tokenize
-%%EXAMPLES%%bin/train-text-from-scratch
+%%EXAMPLES%%bin/llama-bench-matmult
+%%EXAMPLES%%bin/llama-cli
+%%EXAMPLES%%bin/llama-convert-llama2c-to-ggml
+%%EXAMPLES%%bin/llama-embedding
+%%EXAMPLES%%bin/llama-eval-callback
+%%EXAMPLES%%bin/llama-export-lora
+%%EXAMPLES%%bin/llama-finetune
+%%EXAMPLES%%bin/llama-gbnf-validator
+%%EXAMPLES%%bin/llama-gguf
+%%EXAMPLES%%bin/llama-gguf-split
+%%EXAMPLES%%bin/llama-gritlm
+%%EXAMPLES%%bin/llama-imatrix
+%%EXAMPLES%%bin/llama-infill
+%%EXAMPLES%%bin/llama-llava-cli
+%%EXAMPLES%%bin/llama-lookahead
+%%EXAMPLES%%bin/llama-lookup
+%%EXAMPLES%%bin/llama-lookup-create
+%%EXAMPLES%%bin/llama-lookup-merge
+%%EXAMPLES%%bin/llama-lookup-stats
+%%EXAMPLES%%bin/llama-parallel
+%%EXAMPLES%%bin/llama-passkey
+%%EXAMPLES%%bin/llama-perplexity
+%%EXAMPLES%%bin/llama-quantize
+%%EXAMPLES%%bin/llama-quantize-stats
+%%EXAMPLES%%bin/llama-retrieval
+%%EXAMPLES%%bin/llama-save-load-state
+%%EXAMPLES%%bin/llama-server
+%%EXAMPLES%%bin/llama-simple
+%%EXAMPLES%%bin/llama-speculative
+%%EXAMPLES%%bin/llama-tokenize
+%%EXAMPLES%%bin/llama-train-text-from-scratch
 include/ggml-alloc.h
 include/ggml-backend.h
 include/ggml.h