diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index 12023ccb8981..c2612c1e54b8 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,81 +1,81 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 7709 +DISTVERSION= 8132 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 BROKEN_i386= compilation fails, see https://github.com/ggerganov/llama.cpp/issues/9545 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS #LLAMA_USE_SYSTEM_GGML CMAKE_OFF= GGML_NATIVE \ FREEBSD_ALLOW_ADVANCED_CPU_FEATURES \ LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS # user for llama-server, only used when EXAMPLES=ON USER= nobody SUB_LIST= USER=${USER} OPTIONS_DEFINE= CURL EXAMPLES VULKAN OPTIONS_DEFAULT= CURL VULKAN OPTIONS_SUB= yes CURL_DESCR= Use libcurl to download model from an URL CURL_CMAKE_BOOL= LLAMA_CURL CURL_USES= localbase CURL_LIB_DEPENDS= libcurl.so:ftp/curl EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false \ python=${PYTHON_CMD} # for tests post-patch: # set version in the code @${REINPLACE_CMD} \ -e "s|set(BUILD_NUMBER 0)|set(BUILD_NUMBER ${DISTVERSION})|" \ ${WRKSRC}/cmake/build-info.cmake do-test-ci: # build of tests fails, see https://github.com/ggerganov/llama.cpp/issues/10955 @cd ${WRKSRC} && \ ${SETENV} ${MAKE_ENV} bash ci/run.sh ./tmp/results ./tmp/mnt .include .if ${PORT_OPTIONS:MEXAMPLES} USE_RC_SUBR= llama-server .endif # tests as of 4458: 97% tests passed, 1 tests failed out of 31, see https://github.com/ggerganov/llama.cpp/issues/11036 # tests as of 4649: # 88% tests passed, 4 tests failed out of 32 # The following tests FAILED: # 18 - test-chat (Subprocess aborted) main # see https://github.com/ggerganov/llama.cpp/issues/11705 # 24 - test-gguf (SEGFAULT) main # 25 - test-backend-ops (SEGFAULT) main # 32 - test-eval-callback (SEGFAULT) curl eval-callback .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index 13090992cbe0..d75ba99c1802 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1768206387 -SHA256 (ggerganov-llama.cpp-b7709_GH0.tar.gz) = 8aa5d02ec90c70fa496cc878ef3962733e74184a8e43f191db5471288f5cf911 -SIZE (ggerganov-llama.cpp-b7709_GH0.tar.gz) = 28712220 +TIMESTAMP = 1771831071 +SHA256 (ggerganov-llama.cpp-b8132_GH0.tar.gz) = 346e3ec5c8146947d4cdda6bf340384006c590b0c4f8a3c89f265dd076351f57 +SIZE (ggerganov-llama.cpp-b8132_GH0.tar.gz) = 29059325 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist index 84f8c70f2bbd..4a4098d35727 100644 --- a/misc/llama-cpp/pkg-plist +++ b/misc/llama-cpp/pkg-plist @@ -1,83 +1,84 @@ bin/convert_hf_to_gguf.py %%EXAMPLES%%bin/llama-batched %%EXAMPLES%%bin/llama-batched-bench %%EXAMPLES%%bin/llama-bench %%EXAMPLES%%bin/llama-cli %%EXAMPLES%%bin/llama-completion %%EXAMPLES%%bin/llama-convert-llama2c-to-ggml %%EXAMPLES%%bin/llama-cvector-generator %%EXAMPLES%%bin/llama-debug %%EXAMPLES%%bin/llama-diffusion-cli %%EXAMPLES%%bin/llama-embedding %%EXAMPLES%%bin/llama-eval-callback %%EXAMPLES%%bin/llama-export-lora %%EXAMPLES%%bin/llama-finetune %%EXAMPLES%%bin/llama-fit-params %%EXAMPLES%%bin/llama-gen-docs %%EXAMPLES%%bin/llama-gguf %%EXAMPLES%%bin/llama-gguf-hash %%EXAMPLES%%bin/llama-gguf-split %%EXAMPLES%%bin/llama-idle %%EXAMPLES%%bin/llama-imatrix %%EXAMPLES%%bin/llama-lookahead %%EXAMPLES%%bin/llama-lookup %%EXAMPLES%%bin/llama-lookup-create %%EXAMPLES%%bin/llama-lookup-merge %%EXAMPLES%%bin/llama-lookup-stats %%EXAMPLES%%bin/llama-mtmd-cli %%EXAMPLES%%bin/llama-parallel %%EXAMPLES%%bin/llama-passkey %%EXAMPLES%%bin/llama-perplexity %%EXAMPLES%%bin/llama-quantize %%EXAMPLES%%bin/llama-retrieval %%EXAMPLES%%bin/llama-save-load-state %%EXAMPLES%%bin/llama-server %%EXAMPLES%%bin/llama-simple %%EXAMPLES%%bin/llama-simple-chat %%EXAMPLES%%bin/llama-speculative %%EXAMPLES%%bin/llama-speculative-simple %%EXAMPLES%%bin/llama-tokenize %%EXAMPLES%%bin/llama-tts include/ggml-alloc.h include/ggml-backend.h include/ggml-blas.h include/ggml-cann.h include/ggml-cpp.h include/ggml-cpu.h include/ggml-cuda.h include/ggml-metal.h include/ggml-opt.h include/ggml-rpc.h include/ggml-sycl.h +include/ggml-virtgpu.h include/ggml-vulkan.h include/ggml-webgpu.h include/ggml-zendnn.h include/ggml.h include/gguf.h include/llama-cpp.h include/llama.h include/mtmd-helper.h include/mtmd.h lib/cmake/ggml/ggml-config.cmake lib/cmake/ggml/ggml-version.cmake lib/cmake/llama/llama-config.cmake lib/cmake/llama/llama-version.cmake lib/libggml-base.so lib/libggml-base.so.0 -lib/libggml-base.so.0.9.5 +lib/libggml-base.so.0.9.7 lib/libggml-cpu.so lib/libggml-cpu.so.0 -lib/libggml-cpu.so.0.9.5 -%%VULKAN%%lib/libggml-vulkan.so -%%VULKAN%%lib/libggml-vulkan.so.0 -%%VULKAN%%lib/libggml-vulkan.so.0.9.5 +lib/libggml-cpu.so.0.9.7 +lib/libggml-vulkan.so +lib/libggml-vulkan.so.0 +lib/libggml-vulkan.so.0.9.7 lib/libggml.so lib/libggml.so.0 -lib/libggml.so.0.9.5 +lib/libggml.so.0.9.7 lib/libllama.so lib/libllama.so.0 -lib/libllama.so.0.0.7709 +lib/libllama.so.0.0.8132 lib/libmtmd.so lib/libmtmd.so.0 -lib/libmtmd.so.0.0.7709 +lib/libmtmd.so.0.0.8132 libdata/pkgconfig/llama.pc