refactor : remove libcurl, use OpenSSL when available (#18828)

This commit is contained in:
Adrien Gallouët 2026-01-14 18:02:47 +01:00 committed by GitHub
parent 3e4bb29666
commit 516a4ca9b5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 74 additions and 500 deletions

View file

@ -109,8 +109,7 @@ rm -rf "$build_dir" && mkdir "$build_dir" || abort "Failed to make $build_dir"
# Step 2: Setup Build Environment and Compile Test Binaries
###########################################################
# Note: test-eval-callback requires -DLLAMA_CURL
cmake -B "./$build_dir" -DCMAKE_BUILD_TYPE=Debug -DGGML_CUDA=1 -DLLAMA_CURL=1 || abort "Failed to build environment"
cmake -B "./$build_dir" -DCMAKE_BUILD_TYPE=Debug -DGGML_CUDA=1 || abort "Failed to build environment"
pushd "$build_dir"
make -j || abort "Failed to compile"
popd > /dev/null || exit 1

View file

@ -4,7 +4,7 @@ const path = require('path');
// This file is used for testing wasm build from emscripten
// Example build command:
// emcmake cmake -B build-wasm -DGGML_WEBGPU=ON -DLLAMA_CURL=OFF
// emcmake cmake -B build-wasm -DGGML_WEBGPU=ON -DLLAMA_OPENSSL=OFF
// cmake --build build-wasm --target test-backend-ops -j
const PORT = 8080;

View file

@ -7,7 +7,7 @@
Simple usage example:
cmake -B build -DLLAMA_CURL=1 && cmake --build build --config Release -j -t llama-server
cmake -B build && cmake --build build --config Release -j -t llama-server
export LLAMA_SERVER_BIN_PATH=$PWD/build/bin/llama-server
export LLAMA_CACHE=${LLAMA_CACHE:-$HOME/Library/Caches/llama.cpp}