diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index 3cabec77cc9a..adc5ba1cb67f 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,36 +1,36 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 2144 +DISTVERSION= 2167 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE -USES= cmake:testing python:run shebangfix +USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS LDFLAGS+= -pthread OPTIONS_DEFINE= EXAMPLES OPTIONS_SUB= yes EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES BINARY_ALIAS= git=false .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index b11d5d4b173e..a420832ab703 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1707995361 -SHA256 (ggerganov-llama.cpp-b2144_GH0.tar.gz) = 679d2deb41b9df3d04bc5fb3b8fd255717009a08927962eca6476f26bff74731 -SIZE (ggerganov-llama.cpp-b2144_GH0.tar.gz) = 8562099 +TIMESTAMP = 1708158661 +SHA256 (ggerganov-llama.cpp-b2167_GH0.tar.gz) = bc6f0d0a17adee4f530038d7d5c92b5b846ad11c3e0e899e9e44e6523c66553c +SIZE (ggerganov-llama.cpp-b2167_GH0.tar.gz) = 8582970 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 diff --git a/misc/llama-cpp/pkg-plist b/misc/llama-cpp/pkg-plist index ced1b9e1d19c..804a752c97d4 100644 --- a/misc/llama-cpp/pkg-plist +++ b/misc/llama-cpp/pkg-plist @@ -1,38 +1,39 @@ %%EXAMPLES%%bin/baby-llama %%EXAMPLES%%bin/batched %%EXAMPLES%%bin/batched-bench %%EXAMPLES%%bin/beam-search %%EXAMPLES%%bin/benchmark %%EXAMPLES%%bin/convert-llama2c-to-ggml %%EXAMPLES%%bin/convert-lora-to-ggml.py %%EXAMPLES%%bin/convert.py %%EXAMPLES%%bin/embedding %%EXAMPLES%%bin/export-lora %%EXAMPLES%%bin/finetune +%%EXAMPLES%%bin/gguf %%EXAMPLES%%bin/imatrix %%EXAMPLES%%bin/infill %%EXAMPLES%%bin/llama-bench %%EXAMPLES%%bin/llava-cli %%EXAMPLES%%bin/lookahead %%EXAMPLES%%bin/lookup %%EXAMPLES%%bin/main %%EXAMPLES%%bin/parallel %%EXAMPLES%%bin/passkey %%EXAMPLES%%bin/perplexity %%EXAMPLES%%bin/quantize %%EXAMPLES%%bin/quantize-stats %%EXAMPLES%%bin/save-load-state %%EXAMPLES%%bin/server %%EXAMPLES%%bin/simple %%EXAMPLES%%bin/speculative %%EXAMPLES%%bin/tokenize %%EXAMPLES%%bin/train-text-from-scratch include/ggml-alloc.h include/ggml-backend.h include/ggml.h include/llama.h lib/cmake/Llama/LlamaConfig.cmake lib/cmake/Llama/LlamaConfigVersion.cmake lib/libggml_shared.so lib/libllama.so %%EXAMPLES%%lib/libllava_shared.so