diff --git a/misc/llama-cpp/Makefile b/misc/llama-cpp/Makefile index 1eeab118f76e..ed8f01264362 100644 --- a/misc/llama-cpp/Makefile +++ b/misc/llama-cpp/Makefile @@ -1,55 +1,55 @@ PORTNAME= llama-cpp DISTVERSIONPREFIX= b -DISTVERSION= 4409 +DISTVERSION= 4418 CATEGORIES= misc # machine-learning MAINTAINER= yuri@FreeBSD.org COMMENT= Facebook's LLaMA model in C/C++ # ' WWW= https://github.com/ggerganov/llama.cpp LICENSE= MIT LICENSE_FILE= ${WRKSRC}/LICENSE BROKEN_armv7= clang crashes, see https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=278810 BROKEN_i386= compilation fails, see https://github.com/ggerganov/llama.cpp/issues/9545 USES= cmake:testing compiler:c++11-lang python:run shebangfix USE_LDCONFIG= yes USE_GITHUB= yes GH_ACCOUNT= ggerganov GH_PROJECT= llama.cpp GH_TUPLE= nomic-ai:kompute:4565194:kompute/kompute SHEBANG_GLOB= *.py CMAKE_ON= BUILD_SHARED_LIBS CMAKE_OFF= LLAMA_BUILD_TESTS CMAKE_TESTING_ON= LLAMA_BUILD_TESTS OPTIONS_DEFINE= CURL EXAMPLES VULKAN OPTIONS_DEFAULT= CURL VULKAN OPTIONS_SUB= yes CURL_DESCR= Use libcurl to download model from an URL CURL_CMAKE_BOOL= LLAMA_CURL CURL_USES= localbase CURL_LIB_DEPENDS= libcurl.so:ftp/curl EXAMPLES_CMAKE_BOOL= LLAMA_BUILD_EXAMPLES VULKAN_DESC= Vulkan GPU offload support VULKAN_CMAKE_BOOL= GGML_VULKAN VULKAN_BUILD_DEPENDS= glslc:graphics/shaderc \ vulkan-headers>0:graphics/vulkan-headers VULKAN_LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader BINARY_ALIAS= git=false do-test-ci: # build of tests fails, see https://github.com/ggerganov/llama.cpp/issues/10955 @cd ${WRKSRC} && \ ${SETENV} ${MAKE_ENV} bash ci/run.sh ./tmp/results ./tmp/mnt # tests as of 4404: 97% tests passed, 1 tests failed out of 31, see https://github.com/ggerganov/llama.cpp/issues/11036 .include diff --git a/misc/llama-cpp/distinfo b/misc/llama-cpp/distinfo index 1077df5ceb17..45282e0c4897 100644 --- a/misc/llama-cpp/distinfo +++ b/misc/llama-cpp/distinfo @@ -1,5 +1,5 @@ -TIMESTAMP = 1735962918 -SHA256 (ggerganov-llama.cpp-b4409_GH0.tar.gz) = 71a62315c73b1f00bdd5fdec7d16a77a0d6e1150171953fde230f04e7d5adfa1 -SIZE (ggerganov-llama.cpp-b4409_GH0.tar.gz) = 20608272 +TIMESTAMP = 1736116712 +SHA256 (ggerganov-llama.cpp-b4418_GH0.tar.gz) = b1d13215cbdb076ead3ed86043edc5c96999a204b95ad778fce8cd229d4d5427 +SIZE (ggerganov-llama.cpp-b4418_GH0.tar.gz) = 20608757 SHA256 (nomic-ai-kompute-4565194_GH0.tar.gz) = 95b52d2f0514c5201c7838348a9c3c9e60902ea3c6c9aa862193a212150b2bfc SIZE (nomic-ai-kompute-4565194_GH0.tar.gz) = 13540496 diff --git a/misc/llama-cpp/files/patch-ggml_src_CMakeLists.txt b/misc/llama-cpp/files/patch-ggml_src_CMakeLists.txt new file mode 100644 index 000000000000..8135824e1b8b --- /dev/null +++ b/misc/llama-cpp/files/patch-ggml_src_CMakeLists.txt @@ -0,0 +1,20 @@ +- workaround for https://github.com/ggerganov/llama.cpp/issues/11095 + +--- ggml/src/CMakeLists.txt.orig 2025-01-06 00:37:35 UTC ++++ ggml/src/CMakeLists.txt +@@ -152,15 +152,6 @@ endif() + # posix_memalign came in POSIX.1-2001 / SUSv3 + # M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985) + +-# Somehow in OpenBSD whenever POSIX conformance is specified +-# some string functions rely on locale_t availability, +-# which was introduced in POSIX.1-2008, forcing us to go higher +-if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") +- add_compile_definitions(_XOPEN_SOURCE=700) +-else() +- add_compile_definitions(_XOPEN_SOURCE=600) +-endif() +- + # Data types, macros and functions related to controlling CPU affinity and + # some memory allocation are available on Linux through GNU extensions in libc + if (CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "Android") diff --git a/misc/llama-cpp/pkg-message b/misc/llama-cpp/pkg-message new file mode 100644 index 000000000000..071e82665d9a --- /dev/null +++ b/misc/llama-cpp/pkg-message @@ -0,0 +1,17 @@ +[ +{ type: install + message: <