git: 0b2c10a5c55d - main - misc/llama-cpp: update 3510 → 3538
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 07 Aug 2024 09:09:57 UTC
The branch main has been updated by yuri: URL: https://cgit.FreeBSD.org/ports/commit/?id=0b2c10a5c55d4638553c86281632495b962e48de commit 0b2c10a5c55d4638553c86281632495b962e48de Author: Yuri Victorovich <yuri@FreeBSD.org> AuthorDate: 2024-08-07 07:46:42 +0000 Commit: Yuri Victorovich <yuri@FreeBSD.org> CommitDate: 2024-08-07 09:09:44 +0000 misc/llama-cpp: update 3510 → 3538 Reported by: portscout --- misc/llama-cpp/Makefile | 15 ++++++++++++--- misc/llama-cpp/distinfo | 6 +++--- misc/llama-cpp/pkg-plist | 1 + 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index a1f36f59f1bb..ada8d3868dec 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,6 +1,6 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 3510 +DISTVERSION= 3538 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org @@ -12,6 +12,8 @@ LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 +LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader + USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes @@ -28,13 +30,20 @@ CMAKE_TESTING_ON= LLAMA_BUILD_TESTS LDFLAGS+= -pthread -OPTIONS_DEFINE= EXAMPLES +OPTIONS_DEFINE= EXAMPLES VULKAN +OPTIONS_DEFAULT= VULKAN OPTIONS_SUB= yes EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES +VULKAN_DESC= Vulkan GPU offload support +VULKAN_CMAKE_BOOL= GGML_VULKAN +VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ + vulkan-headers>0:graphics/vulkan-headers +VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader + BINARY_ALIAS= git=false -# 1 test fails due to a missing model file (stories260K.gguf) +# 2 tests fail: https://github.com/ggerganov/llama.cpp/issues/8906 .include <bsd.port.mk> diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index 1a81c2d4ca35..8abf678dcda5 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1722831985 -SHA256 (ggerganov-llama.cpp-b3510_GH0.tar.gz) = 3369228d3209f5274ca5d650cdda6caaa7e08d3dbb356205c35fd22f4f0e5184 -SIZE (ggerganov-llama.cpp-b3510_GH0.tar.gz) = 19010230 +TIMESTAMP = 1722999344 +SHA256 (ggerganov-llama.cpp-b3538_GH0.tar.gz) = d5260bd41a80a7a1df5c908deae3486604b4f45ccf7bc0fbceca13fdc32e31d9 +SIZE (ggerganov-llama.cpp-b3538_GH0.tar.gz) = 19014576 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist index a6f7ee8ad18b..7d7cc039fb51 100644 --- a/misc/llama-cpp/pkg-plist +++ b/misc/llama-cpp/pkg-plist @@ -34,6 +34,7 @@ bin/llama-embedding %%EXAMPLES%%bin/llama-simple %%EXAMPLES%%bin/llama-speculative %%EXAMPLES%%bin/llama-tokenize +%%VULKAN%%bin/vulkan-shaders-gen include/ggml-alloc.h include/ggml-backend.h include/ggml-blas.h