From 1d660d2fae42ea2e1d3569638e722bf7a37b6b19 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 22 Sep 2025 09:11:39 +0300 Subject: [PATCH 01/12] ci : use smaller model (#16168) * ci : switch from gemma to qwen3 0.6b * ci : use smaller model for some tests --- ci/run.sh | 35 +++++------------------------------ tools/gguf-split/tests.sh | 18 +++++++++--------- tools/quantize/tests.sh | 12 ++++++------ 3 files changed, 20 insertions(+), 45 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index 09417ef61..cb90f7a78 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -210,33 +210,9 @@ function gg_sum_ctest_release { gg_printf '```\n' } -# test_scripts_debug +# test_scripts -function gg_run_test_scripts_debug { - cd ${SRC} - - set -e - - (cd ./tools/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log - (cd ./tools/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log - - set +e -} - -function gg_sum_test_scripts_debug { - gg_printf '### %s\n\n' "${ci}" - - gg_printf 'Runs test scripts in debug mode\n' - gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" - gg_printf '```\n' - gg_printf '%s\n' "$(cat $OUT/${ci}-scripts.log)" - gg_printf '```\n' - gg_printf '\n' -} - -# test_scripts_release - -function gg_run_test_scripts_release { +function gg_run_test_scripts { cd ${SRC} set -e @@ -247,10 +223,10 @@ function gg_run_test_scripts_release { set +e } -function gg_sum_test_scripts_release { +function gg_sum_test_scripts { gg_printf '### %s\n\n' "${ci}" - gg_printf 'Runs test scripts in release mode\n' + gg_printf 'Runs test scripts\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '```\n' gg_printf '%s\n' "$(cat $OUT/${ci}-scripts.log)" @@ -627,8 +603,7 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then test $ret -eq 0 && gg_run rerank_tiny if [ -z ${GG_BUILD_CLOUD} ] || [ ${GG_BUILD_EXTRA_TESTS_0} ]; then - test $ret -eq 0 && gg_run test_scripts_debug - test $ret -eq 0 && gg_run test_scripts_release + test $ret -eq 0 && gg_run test_scripts fi test $ret -eq 0 && gg_run qwen3_0_6b diff --git a/tools/gguf-split/tests.sh b/tools/gguf-split/tests.sh index c9ad85da0..e8677018f 100755 --- a/tools/gguf-split/tests.sh +++ b/tools/gguf-split/tests.sh @@ -31,27 +31,27 @@ rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-merge*.gguf # 1. Get a model ( cd $WORK_PATH -"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf +"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/Qwen3-0.6B-GGUF --file Qwen3-0.6B-Q8_0.gguf ) echo PASS # 2. Split with max tensors strategy -$SPLIT --split-max-tensors 28 $WORK_PATH/gemma-1.1-2b-it.Q8_0.gguf $WORK_PATH/ggml-model-split +$SPLIT --split-max-tensors 28 $WORK_PATH/Qwen3-0.6B-Q8_0.gguf $WORK_PATH/ggml-model-split echo PASS echo # 2b. Test the sharded model is loading properly -$MAIN -no-cnv --model $WORK_PATH/ggml-model-split-00001-of-00006.gguf --n-predict 32 +$MAIN -no-cnv --model $WORK_PATH/ggml-model-split-00001-of-00012.gguf -p "I believe the meaning of life is" --n-predict 32 echo PASS echo # 3. Merge -$SPLIT --merge $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-merge.gguf +$SPLIT --merge $WORK_PATH/ggml-model-split-00001-of-00012.gguf $WORK_PATH/ggml-model-merge.gguf echo PASS echo # 3b. Test the merged model is loading properly -$MAIN -no-cnv --model $WORK_PATH/ggml-model-merge.gguf --n-predict 32 +$MAIN -no-cnv --model $WORK_PATH/ggml-model-merge.gguf -p "I believe the meaning of life is" --n-predict 32 echo PASS echo @@ -61,12 +61,12 @@ echo PASS echo # 4b. Test the sharded model is loading properly -$MAIN -no-cnv --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00007.gguf --n-predict 32 +$MAIN -no-cnv --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00011.gguf -p "I believe the meaning of life is" --n-predict 32 echo PASS echo # 5. Merge -#$SPLIT --merge $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf $WORK_PATH/ggml-model-merge-2.gguf +#$SPLIT --merge $WORK_PATH/ggml-model-split-32-tensors-00001-of-00012.gguf $WORK_PATH/ggml-model-merge-2.gguf #echo PASS #echo @@ -76,12 +76,12 @@ echo #echo # 6. Split with size strategy -$SPLIT --split-max-size 2G $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-2G +$SPLIT --split-max-size 500M $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-500M echo PASS echo # 6b. Test the sharded model is loading properly -$MAIN -no-cnv --model $WORK_PATH/ggml-model-split-2G-00001-of-00002.gguf --n-predict 32 +$MAIN -no-cnv --model $WORK_PATH/ggml-model-split-500M-00001-of-00002.gguf -p "I believe the meaning of life is" --n-predict 32 echo PASS echo diff --git a/tools/quantize/tests.sh b/tools/quantize/tests.sh index ba9616148..acc54fd9b 100644 --- a/tools/quantize/tests.sh +++ b/tools/quantize/tests.sh @@ -32,32 +32,32 @@ rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-requant*.gguf # 1. Get a model ( cd $WORK_PATH -"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf +"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/Qwen3-0.6B-GGUF --file Qwen3-0.6B-Q8_0.gguf ) echo PASS # 2. Split model -$SPLIT --split-max-tensors 28 $WORK_PATH/gemma-1.1-2b-it.Q8_0.gguf $WORK_PATH/ggml-model-split +$SPLIT --split-max-tensors 28 $WORK_PATH/Qwen3-0.6B-Q8_0.gguf $WORK_PATH/ggml-model-split echo PASS echo # 3. Requant model with '--keep-split' -$QUANTIZE --allow-requantize --keep-split $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-requant.gguf Q4_K +$QUANTIZE --allow-requantize --keep-split $WORK_PATH/ggml-model-split-00001-of-00012.gguf $WORK_PATH/ggml-model-requant.gguf Q4_K echo PASS echo # 3a. Test the requanted model is loading properly -$MAIN -no-cnv --model $WORK_PATH/ggml-model-requant-00001-of-00006.gguf --n-predict 32 +$MAIN -no-cnv --model $WORK_PATH/ggml-model-requant-00001-of-00012.gguf -p "I believe the meaning of life is" --n-predict 32 echo PASS echo # 4. Requant mode without '--keep-split' -$QUANTIZE --allow-requantize $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-requant-merge.gguf Q4_K +$QUANTIZE --allow-requantize $WORK_PATH/ggml-model-split-00001-of-00012.gguf $WORK_PATH/ggml-model-requant-merge.gguf Q4_K echo PASS echo # 4b. Test the requanted model is loading properly -$MAIN -no-cnv --model $WORK_PATH/ggml-model-requant-merge.gguf --n-predict 32 +$MAIN -no-cnv --model $WORK_PATH/ggml-model-requant-merge.gguf -p "I believe the meaning of life is" --n-predict 32 echo PASS echo From ec65fb52f0cc890e5085a6c5995ab08a156265fb Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 22 Sep 2025 10:16:05 +0300 Subject: [PATCH 02/12] ci : remove vulkaninfo calls (#16169) --- .github/workflows/build.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a7efce007..afdcdd07b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1396,7 +1396,6 @@ jobs: - name: Test id: ggml-ci run: | - vulkaninfo GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp ggml-ci-x64-amd-v710-rocm: @@ -1410,7 +1409,6 @@ jobs: - name: Test id: ggml-ci run: | - vulkaninfo GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp ggml-ci-mac-metal: From 5c6106a696af2b00ce01d14ba525979d5a32b199 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 22 Sep 2025 10:58:02 +0300 Subject: [PATCH 03/12] contrib : update roles (#16113) * contrib : update roles * contrib : merge PR sections + add link to CI instructions Updated pull request guidelines for contributors and collaborators, and clarified merging practices for maintainers. --- CONTRIBUTING.md | 34 +++++++++++++++++++++++++++++----- README.md | 2 +- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6db74a0cf..16c3f1c40 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,12 @@ -# Pull requests (for contributors) +# Contributors + +The project differentiates between 3 levels of contributors: + +- Contributors: people who have contributed before (no special privileges) +- Collaborators (Triage): people with significant contributions, who may be responsible for some parts of the code, and are expected to maintain and review contributions for the code they own +- Maintainers: responsible for reviewing and merging PRs, after approval from the code owners + +# Pull requests (for contributors & collaborators) - llama.cpp uses the ggml tensor library for model evaluation. If you are unfamiliar with ggml, consider taking a look at the [examples in the ggml repository](https://github.com/ggml-org/ggml/tree/master/examples/). [simple](https://github.com/ggml-org/ggml/tree/master/examples/simple) shows the bare minimum for using ggml. [gpt-2](https://github.com/ggml-org/ggml/tree/master/examples/gpt-2) has minimal implementations for language model inference using GPT-2. [mnist](https://github.com/ggml-org/ggml/tree/master/examples/mnist) demonstrates how to train and evaluate a simple image classifier - Test your changes: @@ -9,15 +17,16 @@ - Create separate PRs for each feature or fix. Avoid combining unrelated changes in a single PR - Consider allowing write access to your branch for faster reviews, as reviewers can push commits directly - If your PR becomes stale, don't hesitate to ping the maintainers in the comments +- Maintainers will rely on your insights and approval when making a final decision to approve and merge a PR +- Consider adding yourself to [CODEOWNERS](CODEOWNERS) to indicate your availability for reviewing related PRs -# Pull requests (for collaborators) +# Pull requests (for maintainers) - Squash-merge PRs - Use the following format for the squashed commit title: ` : (#)`. For example: `utils : fix typo in utils.py (#1234)` - Optionally pick a `` from here: https://github.com/ggml-org/llama.cpp/wiki/Modules -- Consider adding yourself to [CODEOWNERS](CODEOWNERS) -- Let authors, who are also collaborators, merge their own PRs -- When merging a PR by a contributor, make sure you have a good understanding of the changes +- Let other maintainers, merge their own PRs +- When merging a PR, make sure you have a good understanding of the changes - Be mindful of maintenance: most of the work going into a feature happens after the PR is merged. If the PR author is not committed to contribute long-term, someone else needs to take responsibility (you) # Coding guidelines @@ -117,6 +126,21 @@ #endif // FOO ``` +# Code maintenance + +- Existing code should have designated collaborators and/or maintainers specified in the [CODEOWNERS](CODEOWNERS) file reponsible for: + - Reviewing and merging related PRs + - Fixing related bugs + - Providing developer guidance/support + +- When adding or modifying a large piece of code: + - If you are a collaborator, make sure to add yourself to [CODEOWNERS](CODEOWNERS) to indicate your availability for reviewing related PRs + - If you are a contributor, find an existing collaborator who is willing to review and maintain your code long-term + - Provide the necessary CI workflow (and hardware) to test your changes (see [ci/README.md](https://github.com/ggml-org/llama.cpp/tree/master/ci)) + +- New code should follow the guidelines (coding, naming, etc.) outlined in this document. Exceptions are allowed in isolated, backend-specific parts of the code that do not interface directly with the `ggml` interfaces. + _(NOTE: for legacy reasons, existing code is not required to follow this guideline)_ + # Documentation - Documentation is a community effort diff --git a/README.md b/README.md index 17f59e988..fcd7014a2 100644 --- a/README.md +++ b/README.md @@ -520,8 +520,8 @@ To learn more about model quantization, [read this documentation](tools/quantize ## Contributing - Contributors can open PRs -- Collaborators can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch - Collaborators will be invited based on contributions +- Maintainers can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch - Any help with managing issues, PRs and projects is very appreciated! - See [good first issues](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions - Read the [CONTRIBUTING.md](CONTRIBUTING.md) for more information From b2d980fce063fa3591c59ad50b946c8da66c294f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Mon, 22 Sep 2025 09:59:05 +0200 Subject: [PATCH 04/12] codeowners : claim responsibility for ci, models, gguf-py and convert (#16124) * claim responsibility for ci, gguf-py and convert * add myself to various src/llama- files --- CODEOWNERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CODEOWNERS b/CODEOWNERS index 18564a08b..e14d37861 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -10,3 +10,12 @@ /ggml/src/gguf.cpp @JohannesGaessler /ggml/src/ggml-vulkan/ @0cc4m /ggml/src/ggml-zdnn/ @taronaeo +/.github/workflows/ @CISC +/src/llama-adapter.* @CISC +/src/llama-arch.* @CISC +/src/llama-graph.* @CISC +/src/llama-model.* @CISC +/src/llama-vocab.* @CISC +/gguf-py/ @CISC +/convert_*.py @CISC +requirements*.txt @CISC From 96fdca043be8fa733bbd59868a75517f92633376 Mon Sep 17 00:00:00 2001 From: Shin-myoung-serp Date: Mon, 22 Sep 2025 17:04:01 +0900 Subject: [PATCH 05/12] Vulkan: add conv_transpose_2d operation (#16022) * Vulkan: add conv_transpose_2d operation * Vulkan: fix typo in conv_transpose_2d shader(s0mp, s0L, s1mp, s1L) * Vulkan: fix incorrect indentation in conv_transpose_2d shader * Vulkan: add checking the push constants size limit and reuse conv2d_mm.comp for conv_transpose_2d operation * Vulkan: revert the order of the index calculation and bound check in conv_2d shader * Vulkan: explicity check push constants limit in supports_op() for conv_transpose_2d operation. * Vulkan: remove unnecessary lower bound checks for H/W_idx in the conv_2d shader. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 213 +++++++++++++++--- .../ggml-vulkan/vulkan-shaders/conv2d_mm.comp | 24 +- .../vulkan-shaders/vulkan-shaders-gen.cpp | 26 ++- tests/test-backend-ops.cpp | 4 + 4 files changed, 229 insertions(+), 38 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 5818f938e..0feaf4cb5 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -574,6 +574,8 @@ struct vk_device_struct { vk_pipeline pipeline_opt_step_sgd_f32; vk_pipeline pipeline_conv2d_f32[CONV_SHAPE_COUNT]; vk_pipeline pipeline_conv2d_f16_f32[CONV_SHAPE_COUNT]; + vk_pipeline pipeline_conv_transpose_2d_f32[CONV_SHAPE_COUNT]; + vk_pipeline pipeline_conv_transpose_2d_f16_f32[CONV_SHAPE_COUNT]; vk_pipeline pipeline_conv2d_dw_whcn_f32, pipeline_conv2d_dw_whcn_f16_f32; vk_pipeline pipeline_conv2d_dw_cwhn_f32, pipeline_conv2d_dw_cwhn_f16_f32; @@ -1117,6 +1119,56 @@ template <> void init_pushconst_fastdiv(vk_op_conv2d_push_constants &p) { init_fastdiv_values(p.OW*p.OH, p.OWOHmp, p.OWOHL); } +struct vk_op_conv_transpose_2d_push_constants { + uint32_t Cout; + uint32_t Cin; + uint32_t N; + + uint32_t KW; + uint32_t KH; + uint32_t W; + uint32_t H; + uint32_t OW; + uint32_t OH; + + uint32_t s0; + uint32_t s1; + uint32_t p0; + uint32_t p1; + uint32_t d0; + uint32_t d1; + + uint32_t nb01; + uint32_t nb02; + uint32_t nb03; + + uint32_t nb11; + uint32_t nb12; + uint32_t nb13; + + uint32_t nb1; + uint32_t nb2; + uint32_t nb3; + + // init_fastdiv_values constants for dividing by KW, KW*KH, OW, OW*OH, s0, s1 + uint32_t KWmp; uint32_t KWL; + uint32_t KWKHmp; uint32_t KWKHL; + uint32_t OWmp; uint32_t OWL; + uint32_t OWOHmp; uint32_t OWOHL; + uint32_t s0mp; uint32_t s0L; + uint32_t s1mp; uint32_t s1L; +}; + +template <> void init_pushconst_fastdiv(vk_op_conv_transpose_2d_push_constants &p) { + // Compute magic values to divide by KW, KW*KH, OW, OW*OH, s0, s1 + init_fastdiv_values(p.KW, p.KWmp, p.KWL); + init_fastdiv_values(p.KW*p.KH, p.KWKHmp, p.KWKHL); + init_fastdiv_values(p.OW, p.OWmp, p.OWL); + init_fastdiv_values(p.OW*p.OH, p.OWOHmp, p.OWOHL); + init_fastdiv_values(p.s0, p.s0mp, p.s0L); + init_fastdiv_values(p.s1, p.s1mp, p.s1L); +} + struct vk_op_conv2d_dw_push_constants { uint32_t ne; uint32_t batches; @@ -1322,7 +1374,7 @@ class vk_perf_logger { flops[name].push_back(m * n * (k + (k - 1)) * batch); return; } - if (node->op == GGML_OP_CONV_2D) { + if (node->op == GGML_OP_CONV_2D || node->op == GGML_OP_CONV_TRANSPOSE_2D) { std::string name = ggml_op_name(node->op); ggml_tensor * knl = node->src[0]; uint64_t OW = node->ne[0]; @@ -1331,7 +1383,7 @@ class vk_perf_logger { uint64_t Cout = node->ne[2]; uint64_t KW = knl->ne[0]; uint64_t KH = knl->ne[1]; - uint64_t Cin = knl->ne[2]; + uint64_t Cin = node->src[1]->ne[2]; // KxCRS @ CRSxNPQ = KxNPQ -> M=K, K=CRS, N=NPQ uint64_t size_M = Cout; uint64_t size_K = Cin * KW * KH; @@ -3492,7 +3544,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_opt_step_sgd_f32, "opt_step_sgd_f32", opt_step_sgd_f32_len, opt_step_sgd_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); - // conv2d + // conv2d, conv_transpose_2d for (uint32_t s = 0; s < CONV_SHAPE_COUNT; ++s) { uint32_t conv2d_WG_SIZE = 256; uint32_t conv2d_BS_K = 128; @@ -3567,31 +3619,30 @@ static void ggml_vk_load_shaders(vk_device& device) { std::array wg_denoms = { conv2d_BS_K, conv2d_BS_NPQ, 1 }; std::vector spec_constants = { conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives, conv2d_SHMEM_PAD }; +#define CREATE_CONV(name, type_suffix, spv_suffix) \ + ggml_vk_create_pipeline( \ + device, device->pipeline_##name##type_suffix[s], #name #type_suffix, \ + name##type_suffix##spv_suffix##_len, name##type_suffix##spv_suffix##_data, "main", 3, \ + sizeof(vk_op_##name##_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); +#define CREATE_CONVS(spv_suffix) \ + CREATE_CONV(conv2d, _f32, spv_suffix) \ + CREATE_CONV(conv2d, _f16_f32, spv_suffix) \ + if (device->properties.limits.maxPushConstantsSize >= sizeof(vk_op_conv_transpose_2d_push_constants)) { \ + CREATE_CONV(conv_transpose_2d, _f32, spv_suffix) \ + CREATE_CONV(conv_transpose_2d, _f16_f32, spv_suffix) \ + } #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) if (device->coopmat2) { - ggml_vk_create_pipeline( - device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_cm2_len, conv2d_f32_cm2_data, "main", 3, - sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); - ggml_vk_create_pipeline( - device, device->pipeline_conv2d_f16_f32[s], "conv2d_f16_f32", conv2d_f16_f32_cm2_len, conv2d_f16_f32_cm2_data, "main", 3, - sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); + CREATE_CONVS(_cm2) } else #endif if (conv2d_UNROLL) { - ggml_vk_create_pipeline( - device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_unroll_len, conv2d_f32_unroll_data, "main", 3, - sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); - ggml_vk_create_pipeline( - device, device->pipeline_conv2d_f16_f32[s], "conv2d_f16_f32", conv2d_f16_f32_unroll_len, conv2d_f16_f32_unroll_data, "main", 3, - sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); + CREATE_CONVS(_unroll) } else { - ggml_vk_create_pipeline( - device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_len, conv2d_f32_data, "main", 3, - sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); - ggml_vk_create_pipeline( - device, device->pipeline_conv2d_f16_f32[s], "conv2d_f16_f32", conv2d_f16_f32_len, conv2d_f16_f32_data, "main", 3, - sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives); + CREATE_CONVS( ) } +#undef CREATE_CONV +#undef CREATE_CONVS } ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f32, "conv2d_dw_whcn_f32", conv2d_dw_whcn_f32_len, conv2d_dw_whcn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1); @@ -7548,6 +7599,33 @@ static std::array ggml_vk_get_conv_elements(const ggml_tensor *dst) return elements; } +static std::array ggml_vk_get_conv_transpose_2d_elements(const ggml_tensor *dst) { + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; + + // src0 - kernel: [KW, KH, Cout, Cin] + // src1 - input: [W, H, Cin, N] + // dst - result: [OW, OH, Cout, N] + + auto calc_conv_output_size = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t { + return (ins - 1) * s - 2 * p + (ks - 1) * d + 1; + }; + // parallelize in {OW/BS_K, OH/BS_NPQ, 1} + int64_t W = src1->ne[0]; + int64_t H = src1->ne[1]; + int64_t KW = src0->ne[0]; + int64_t KH = src0->ne[1]; + int64_t Cout = src0->ne[2]; + int64_t N = src1->ne[3]; + int64_t OH = calc_conv_output_size(H, KH, dst->op_params[0], 0, 1); + int64_t OW = calc_conv_output_size(W, KW, dst->op_params[0], 0, 1); + int64_t NPQ = N * OW * OH; + + // Tile output matrix to (K/NB_K, NPQ/NB_NPQ, 1) workgroups + std::array elements = { static_cast(Cout), static_cast(NPQ), 1 }; + return elements; +} + static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * dst, ggml_op op) { switch (op) { case GGML_OP_GET_ROWS: @@ -7925,9 +8003,12 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const } return nullptr; case GGML_OP_CONV_2D: + case GGML_OP_CONV_TRANSPOSE_2D: if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) { - auto elements = ggml_vk_get_conv_elements(dst); + std::array elements; + if (op == GGML_OP_CONV_2D) elements = ggml_vk_get_conv_elements(dst); + else if (op == GGML_OP_CONV_TRANSPOSE_2D) elements = ggml_vk_get_conv_transpose_2d_elements(dst); vk_conv_shapes shape; uint32_t tiles[CONV_SHAPE_COUNT]; @@ -7947,10 +8028,18 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const shape = CONV_SHAPE_64x32; } - if (src0->type == GGML_TYPE_F32) { - return ctx->device->pipeline_conv2d_f32[shape]; - } else if (src0->type == GGML_TYPE_F16) { - return ctx->device->pipeline_conv2d_f16_f32[shape]; + if (op == GGML_OP_CONV_2D) { + if (src0->type == GGML_TYPE_F32) { + return ctx->device->pipeline_conv2d_f32[shape]; + } else if (src0->type == GGML_TYPE_F16) { + return ctx->device->pipeline_conv2d_f16_f32[shape]; + } + } else if (op == GGML_OP_CONV_TRANSPOSE_2D) { + if (src0->type == GGML_TYPE_F32) { + return ctx->device->pipeline_conv_transpose_2d_f32[shape]; + } else if (src0->type == GGML_TYPE_F16) { + return ctx->device->pipeline_conv_transpose_2d_f16_f32[shape]; + } } } return nullptr; @@ -8350,6 +8439,10 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co { elements = ggml_vk_get_conv_elements(dst); } break; + case GGML_OP_CONV_TRANSPOSE_2D: + { + elements = ggml_vk_get_conv_transpose_2d_elements(dst); + } break; case GGML_OP_ADD: case GGML_OP_SUB: case GGML_OP_DIV: @@ -9523,6 +9616,55 @@ static void ggml_vk_conv_2d(ggml_backend_vk_context * ctx, vk_context & subctx, ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONV_2D, std::move(p), dryrun); } +static void ggml_vk_conv_transpose_2d(ggml_backend_vk_context * ctx, vk_context & subctx, const ggml_tensor * src0, + const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(nb00 == sizeof(float) || nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + GGML_ASSERT(nb0 == sizeof(float)); + + vk_op_conv_transpose_2d_push_constants p{}; + p.Cout = static_cast(ne02); + p.Cin = static_cast(ne03); + p.N = static_cast(ne13); + + p.KW = static_cast(ne00); + p.KH = static_cast(ne01); + p.W = static_cast(ne10); + p.H = static_cast(ne11); + p.OW = static_cast(ne0); + p.OH = static_cast(ne1); + + p.s0 = static_cast(dst->op_params[0]); + p.s1 = static_cast(dst->op_params[0]); + p.p0 = 0; + p.p1 = 0; + p.d0 = 1; + p.d1 = 1; + + p.nb01 = static_cast(nb01 / nb00); + p.nb02 = static_cast(nb02 / nb00); + p.nb03 = static_cast(nb03 / nb00); + + p.nb11 = static_cast(nb11 / nb10); + p.nb12 = static_cast(nb12 / nb10); + p.nb13 = static_cast(nb13 / nb10); + + p.nb1 = static_cast(nb1 / nb0); + p.nb2 = static_cast(nb2 / nb0); + p.nb3 = static_cast(nb3 / nb0); + + GGML_ASSERT(ne02 == ne2); + GGML_ASSERT(ne03 == ne12); + + ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONV_TRANSPOSE_2D, std::move(p), dryrun); +} + static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { vk_op_conv2d_dw_push_constants p{}; p.ne = ggml_nelements(dst); @@ -10615,6 +10757,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_CONV_TRANSPOSE_1D: case GGML_OP_POOL_2D: case GGML_OP_CONV_2D: + case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_CONV_2D_DW: case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: @@ -10686,6 +10829,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_CONV_TRANSPOSE_1D: case GGML_OP_POOL_2D: case GGML_OP_CONV_2D: + case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_CONV_2D_DW: case GGML_OP_LEAKY_RELU: case GGML_OP_OPT_STEP_SGD: @@ -10997,6 +11141,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_CONV_2D: ggml_vk_conv_2d(ctx, compute_ctx, src0, src1, node, dryrun); + break; + case GGML_OP_CONV_TRANSPOSE_2D: + ggml_vk_conv_transpose_2d(ctx, compute_ctx, src0, src1, node, dryrun); + break; case GGML_OP_CONV_2D_DW: ggml_vk_conv_2d_dw(ctx, compute_ctx, src0, src1, node, dryrun); @@ -11137,6 +11285,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * case GGML_OP_CONV_TRANSPOSE_1D: case GGML_OP_POOL_2D: case GGML_OP_CONV_2D: + case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_CONV_2D_DW: case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: @@ -11794,10 +11943,10 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg ggml_vk_build_graph(ctx, cgraph, i, nullptr, 0, true, false, false, false); if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) { total_mat_mul_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]); - } else if (cgraph->nodes[i]->op == GGML_OP_CONV_2D) { + } else if (cgraph->nodes[i]->op == GGML_OP_CONV_2D || cgraph->nodes[i]->op == GGML_OP_CONV_TRANSPOSE_2D) { // Return CRSxNPQxsizeof(*) to account as many bytes as mul_mat has in im2col->mul_mat mode. auto CRS_size = - cgraph->nodes[i]->src[0]->ne[0] * cgraph->nodes[i]->src[0]->ne[1] * cgraph->nodes[i]->src[0]->ne[2]; + cgraph->nodes[i]->src[0]->ne[0] * cgraph->nodes[i]->src[0]->ne[1] * cgraph->nodes[i]->src[1]->ne[2]; auto NPQ_size = cgraph->nodes[i]->ne[0] * cgraph->nodes[i]->ne[1] * cgraph->nodes[i]->ne[3]; total_mat_mul_bytes += NPQ_size * CRS_size * ggml_type_size(cgraph->nodes[i]->type); } @@ -12618,10 +12767,15 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_CONV_TRANSPOSE_1D: return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32; case GGML_OP_CONV_2D: + case GGML_OP_CONV_TRANSPOSE_2D: { // Op is disabled for Apple because it segfaults at pipeline create time on MoltenVK ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context; const vk_device& device = ggml_vk_get_device(ctx->device); + if (op->op == GGML_OP_CONV_TRANSPOSE_2D && + device->properties.limits.maxPushConstantsSize < sizeof(vk_op_conv_transpose_2d_push_constants)) { + return false; + } // Channel-contiguous format is not supported yet. return ((op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) && op->src[1]->type == GGML_TYPE_F32 && @@ -13240,6 +13394,9 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * const int32_t d0 = tensor->op_params[4]; const int32_t d1 = tensor->op_params[5]; tensor_clone = ggml_conv_2d(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1); + } else if (tensor->op == GGML_OP_CONV_TRANSPOSE_2D) { + const int32_t s = tensor->op_params[0]; + tensor_clone = ggml_conv_transpose_2d_p0(ggml_ctx, src_clone[0], src_clone[1], s); } else if (tensor->op == GGML_OP_LEAKY_RELU) { const float * op_params = (const float *)tensor->op_params; tensor_clone = ggml_leaky_relu(ggml_ctx, src_clone[0], op_params[0], false); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp index 86bafba4a..44a64ddc8 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp @@ -16,7 +16,7 @@ // shape notation: [dim(N), ..., dim(0)] -- stride(dim(j)) >= stride(dim(i)) if i > j layout(binding = 0) readonly buffer A { A_TYPE knl_data[]; -}; // src0 - kernel: [KW, KH, Cin, Cout] +}; // src0 - kernel: [KW, KH, Cin, Cout] for conv_2d, [KW, KH, Cout, Cin] for conv_transposed_2d layout(binding = 1) readonly buffer B { B_TYPE src_data[]; @@ -66,6 +66,10 @@ layout(push_constant) uniform parameter { uint32_t KWKHmp; uint32_t KWKHL; uint32_t OWmp; uint32_t OWL; uint32_t OWOHmp; uint32_t OWOHL; +#ifdef TRANSPOSE + uint32_t s0mp; uint32_t s0L; + uint32_t s1mp; uint32_t s1L; +#endif } p; @@ -225,7 +229,11 @@ void main() { uint32_t B_ly = r_offset + Ar; uint32_t B_lx = Ac; uint32_t K_idx = B_idx_K * BS_K + B_ly; /* Global K_idx (row index of A)*/ +#ifdef TRANSPOSE + uint32_t knl_idx = min(KW_idx_a + KH_idx_a * p.nb01 + K_idx * p.nb02 + Cin_idx_a * p.nb03, K * CRS - 1); +#else uint32_t knl_idx = min(KW_idx_a + KH_idx_a * p.nb01 + Cin_idx_a * p.nb02 + K_idx * p.nb03, K * CRS - 1); +#endif float val = knl_data[knl_idx]; if (K_idx >= K || CRS_idx_a >= CRS) { val = 0.0; @@ -267,12 +275,24 @@ void main() { KW_idx_b = CRS_remainder - KH_idx_b * p.KW; #endif +#ifdef TRANSPOSE + uint32_t H_idx_x_s1 = OH_idx - KH_idx_b * p.d1 + p.p1; + uint32_t W_idx_x_s0 = OW_idx - KW_idx_b * p.d0 + p.p0; + uint32_t H_idx = fastdiv(H_idx_x_s1, p.s1mp, p.s1L); + uint32_t W_idx = fastdiv(W_idx_x_s0, p.s0mp, p.s0L); +#else uint32_t H_idx = OH_idx * p.s1 + KH_idx_b * p.d1 - p.p1; uint32_t W_idx = OW_idx * p.s0 + KW_idx_b * p.d0 - p.p0; +#endif uint32_t src_idx = min(max(W_idx + H_idx * p.nb11 + Cin_idx_b * p.nb12 + N_idx * p.nb13, 0), p.Cin * p.N * p.W * p.H - 1); float val = src_data[src_idx]; - if (CRS_idx_b >= CRS || NPQ_idx >= NPQ || H_idx < 0 || H_idx >= p.H || W_idx < 0 || W_idx >= p.W) { + if (CRS_idx_b >= CRS || NPQ_idx >= NPQ + || H_idx >= p.H || W_idx >= p.W // Lower bound checks aren't necessary. (idx >= 0x80000000 for such case) +#ifdef TRANSPOSE + || (H_idx_x_s1 - H_idx * p.s1 != 0) || (W_idx_x_s0 - W_idx * p.s0 != 0) +#endif + ) { val = 0.0; } Bsh[B_ly * Bsh_stride + B_lx] = SHMEM_TYPE(val); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 86c873cc4..2531610e4 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -796,16 +796,26 @@ void process_shaders() { string_to_spv("opt_step_adamw_f32", "opt_step_adamw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); string_to_spv("opt_step_sgd_f32", "opt_step_sgd.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); - string_to_spv("conv2d_f32_unroll", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}}); - string_to_spv("conv2d_f16_f32_unroll", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}}); - - string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}}); - string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}}); - + for (auto transpose : {false, true}) { + for (auto unroll : {false, true}) { + for (auto a_f16 : {false, true}) { + std::map defines = { + {"A_TYPE", a_f16 ? "float16_t" : "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, + {"USE_COLLECTIVES", "1"}, {"UNROLL", unroll ? "[[unroll]]" : ""}, + }; + if (transpose) defines["TRANSPOSE"] = "1"; + std::string name = std::string(transpose ? "conv_transpose_2d": "conv2d") + + (a_f16 ? "_f16" : "") + "_f32"; + string_to_spv(name + (unroll ? "_unroll" : ""), "conv2d_mm.comp", defines); #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) - string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}, {"COOPMAT2", "1"}}, true, false, true); - string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}, {"COOPMAT2", "1"}}, true, false, true); + if (unroll) { + defines["COOPMAT2"] = "1"; + string_to_spv(name, "conv2d_mm.comp", defines, true, false, true); + } #endif + } + } + } string_to_spv("conv2d_dw_whcn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"WHCN", "1"}})); string_to_spv("conv2d_dw_cwhn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"CWHN", "1"}})); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index ef6594ea5..f11eecd8e 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -3969,6 +3969,10 @@ struct test_conv_transpose_2d : public test_case { return VARS_TO_STR3(ne_input, ne_kernel, stride); } + double max_nmse_err() override { + return 5e-4; // The default 1e-7 is too small for Vulkan. + } + test_conv_transpose_2d(std::array ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1] std::array ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1] int stride = 1) From 05a2458121f9cdcc98ea6ffeeab6f23023136266 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Mon, 22 Sep 2025 15:10:58 +0700 Subject: [PATCH 06/12] codeowners : update ownership for @ngxson and @allozuar (#16128) --- CODEOWNERS | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index e14d37861..6ee005161 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,7 +2,10 @@ /ci/ @ggerganov /.devops/*.Dockerfile @ngxson -/tools/server/ @ngxson +/tools/server/* @ngxson # no subdir +/tools/server/webui/ @allozaur +/tools/mtmd/ @ngxson +/src/llama-chat.* @ngxson /ggml/src/ggml-cuda/fattn* @JohannesGaessler /ggml/src/ggml-cuda/mmq.* @JohannesGaessler /ggml/src/ggml-cuda/mmvq.* @JohannesGaessler From a71ae3ba7a857394103f12e30b387c48242c84f2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 22 Sep 2025 11:12:09 +0300 Subject: [PATCH 07/12] ggml : add ggml_op_is_empty (#16122) * ggml : add ggml_op_is_empty * ggml : move to ggml-impl.h --- ggml/src/ggml-impl.h | 15 ++++++++++++++- ggml/src/ggml-metal/ggml-metal-common.cpp | 18 ++---------------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 19a7adb2d..6e01a42ce 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -73,7 +73,7 @@ static inline int ggml_up(int n, int m) { return (n + m - 1) & ~(m - 1); } -// TODO: move to ggml.h? +// TODO: move to ggml.h? (won't be able to inline) static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { if (a->type != b->type) { return false; @@ -89,6 +89,19 @@ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml return true; } +static bool ggml_op_is_empty(enum ggml_op op) { + switch (op) { + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_TRANSPOSE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + return true; + default: + return false; + } +} + // // logging // diff --git a/ggml/src/ggml-metal/ggml-metal-common.cpp b/ggml/src/ggml-metal/ggml-metal-common.cpp index 34d27b632..e61a5706d 100644 --- a/ggml/src/ggml-metal/ggml-metal-common.cpp +++ b/ggml/src/ggml-metal/ggml-metal-common.cpp @@ -184,20 +184,6 @@ bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) { return ggml_mem_ranges_check_dst(mrs, tensor); } -// TODO: move to ggml.h? -static bool is_empty(ggml_op op) { - switch (op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_TRANSPOSE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - return true; - default: - return false; - } -} - struct node_info { ggml_tensor * node; @@ -212,7 +198,7 @@ struct node_info { } bool is_empty() const { - return ::is_empty(node->op); + return ggml_op_is_empty(node->op); } void add_fused(ggml_tensor * t) { @@ -289,7 +275,7 @@ static std::vector ggml_metal_graph_optimize_reorder(const std::vector Date: Mon, 22 Sep 2025 11:12:37 +0300 Subject: [PATCH 08/12] ggml : extend ggml_can_fuse to work with non-sequential nodes (#16123) * ggml : extend ggml_can_fuse to work with non-sequential nodes in the graph * cont : fix wrong bounds check condition * cont : remove unnecessary overload --- ggml/src/ggml-impl.h | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 6e01a42ce..c2eaea22f 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -583,27 +583,27 @@ static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int n return true; } -// Returns true if nodes [i, i+ops.size()) are the sequence of ggml_ops in ops[] +// Returns true if nodes with indices { node_idxs } are the sequence of ggml_ops in ops[] // and are fusable. Nodes are considered fusable according to this function if: // - all nodes except the last have only one use and are not views/outputs (see ggml_node_has_N_uses). // - all nodes except the last are a src of the following node. // - all nodes are the same shape. // TODO: Consider allowing GGML_OP_NONE nodes in between -static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) { - if (node_idx + num_ops > cgraph->n_nodes) { - return false; - } - +static inline bool ggml_can_fuse_ext(const struct ggml_cgraph * cgraph, const int * node_idxs, const enum ggml_op * ops, int num_ops) { for (int i = 0; i < num_ops; ++i) { - struct ggml_tensor * node = cgraph->nodes[node_idx + i]; + if (node_idxs[i] >= cgraph->n_nodes) { + return false; + } + + struct ggml_tensor * node = cgraph->nodes[node_idxs[i]]; if (node->op != ops[i]) { return false; } - if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idx + i, 1)) { + if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idxs[i], 1)) { return false; } if (i > 0) { - struct ggml_tensor * prev = cgraph->nodes[node_idx + i - 1]; + struct ggml_tensor * prev = cgraph->nodes[node_idxs[i - 1]]; if (node->src[0] != prev && node->src[1] != prev) { return false; } @@ -615,6 +615,22 @@ static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx return true; } +// same as above, for sequential indices starting at node_idx +static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) { + assert(num_ops < 32); + + if (node_idx + num_ops > cgraph->n_nodes) { + return false; + } + + int idxs[32]; + for (int i = 0; i < num_ops; ++i) { + idxs[i] = node_idx + i; + } + + return ggml_can_fuse_ext(cgraph, idxs, ops, num_ops); +} + #ifdef __cplusplus } #endif From d05affbab7d1364fa7ac06c03cd33f03235ae840 Mon Sep 17 00:00:00 2001 From: Haiyue Wang Date: Mon, 22 Sep 2025 16:48:42 +0800 Subject: [PATCH 09/12] common : remove unused local variables (#16140) These two local variables 'arg' and 'arg_prefix' have been overriden by: 1. for (const auto & arg : opt.args) 2. for (int i = 1; i < argc; i++) { const std::string arg_prefix = "--"; std::string arg = argv[i]; --- common/arg.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 54df2df87..612336870 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1068,8 +1068,6 @@ static std::string get_all_kv_cache_types() { // static bool common_params_parse_ex(int argc, char ** argv, common_params_context & ctx_arg) { - std::string arg; - const std::string arg_prefix = "--"; common_params & params = ctx_arg.params; std::unordered_map arg_to_options; From c6db9a10278f40b4b8d3f6931728f2cce2356daa Mon Sep 17 00:00:00 2001 From: GideonSerf Date: Mon, 22 Sep 2025 10:49:58 +0200 Subject: [PATCH 10/12] embedding : fix typos in README (#16171) --- examples/embedding/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/embedding/README.md b/examples/embedding/README.md index 12b372bf1..3dd279d9f 100644 --- a/examples/embedding/README.md +++ b/examples/embedding/README.md @@ -43,8 +43,8 @@ The above command will output space-separated float values. | $"string"$ | | |--------------|-| | "\n" | (default) -| "<#embSep#>" | for exemple -| "<#sep#>" | other exemple +| "<#embSep#>" | for example +| "<#sep#>" | other example ## examples ### Unix-based systems (Linux, macOS, etc.): From 138c87ce8bd558b2cc134ada7316a3dad8eb67ac Mon Sep 17 00:00:00 2001 From: Quentin Bramas Date: Mon, 22 Sep 2025 10:53:13 +0200 Subject: [PATCH 11/12] webui : fix handling incomplete chunks (#16107) --- tools/server/webui/src/lib/services/chat.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/server/webui/src/lib/services/chat.ts b/tools/server/webui/src/lib/services/chat.ts index 7133bbab8..91573a866 100644 --- a/tools/server/webui/src/lib/services/chat.ts +++ b/tools/server/webui/src/lib/services/chat.ts @@ -264,12 +264,14 @@ export class ChatService { let lastTimings: ChatMessageTimings | undefined; try { + let chunk = ''; while (true) { const { done, value } = await reader.read(); if (done) break; - const chunk = decoder.decode(value, { stream: true }); + chunk += decoder.decode(value, { stream: true }); const lines = chunk.split('\n'); + chunk = lines.pop() || ''; // Save incomplete line for next read for (const line of lines) { if (line.startsWith('data: ')) { From 37a23c17bdc9c99c9c6ad41168e4ced3724b72cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Gallou=C3=ABt?= Date: Mon, 22 Sep 2025 14:13:51 +0200 Subject: [PATCH 12/12] common : enable `--offline` mode without curl support (#16137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * common : use the json parser Signed-off-by: Adrien Gallouët * common : enable --offline mode without CURL support This change refactors the download logic to properly support offline mode even when the project is built without CURL. Without this commit, using `--offline` would give the following error: error: built without CURL, cannot download model from the internet even if all the files are already cached. Signed-off-by: Adrien Gallouët --------- Signed-off-by: Adrien Gallouët --- common/arg.cpp | 235 +++++++++++++++++++++++-------------------------- 1 file changed, 111 insertions(+), 124 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 612336870..2b5e509d1 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -36,9 +37,21 @@ #if defined(LLAMA_USE_CURL) #include #include -#include #endif +#ifdef __linux__ +#include +#elif defined(_WIN32) +# if !defined(PATH_MAX) +# define PATH_MAX MAX_PATH +# endif +#elif defined(_AIX) +#include +#else +#include +#endif +#define LLAMA_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 + using json = nlohmann::ordered_json; std::initializer_list mmproj_examples = { @@ -208,19 +221,6 @@ bool common_has_curl() { return true; } -#ifdef __linux__ -#include -#elif defined(_WIN32) -# if !defined(PATH_MAX) -# define PATH_MAX MAX_PATH -# endif -#elif defined(_AIX) -#include -#else -#include -#endif -#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 - // // CURL utils // @@ -368,10 +368,9 @@ static bool common_download_head(CURL * curl, } // download one single file from remote URL to local path -static bool common_download_file_single(const std::string & url, - const std::string & path, - const std::string & bearer_token, - bool offline) { +static bool common_download_file_single_online(const std::string & url, + const std::string & path, + const std::string & bearer_token) { // If the file exists, check its JSON metadata companion file. std::string metadata_path = path + ".json"; static const int max_attempts = 3; @@ -384,10 +383,6 @@ static bool common_download_file_single(const std::string & url, // Check if the file already exists locally const auto file_exists = std::filesystem::exists(path); if (file_exists) { - if (offline) { - LOG_INF("%s: using cached file (offline mode): %s\n", __func__, path.c_str()); - return true; // skip verification/downloading - } // Try and read the JSON metadata file (note: stream autoclosed upon exiting this block). std::ifstream metadata_in(metadata_path); if (metadata_in.good()) { @@ -407,10 +402,6 @@ static bool common_download_file_single(const std::string & url, } // if we cannot open the metadata file, we assume that the downloaded file is not valid (etag and last-modified are left empty, so we will download it again) } else { - if (offline) { - LOG_ERR("%s: required file is not available in cache (offline mode): %s\n", __func__, path.c_str()); - return false; - } LOG_INF("%s: no previous model file found %s\n", __func__, path.c_str()); } @@ -530,6 +521,89 @@ static bool common_download_file_single(const std::string & url, return true; } +std::pair> common_remote_get_content(const std::string & url, const common_remote_params & params) { + curl_ptr curl(curl_easy_init(), &curl_easy_cleanup); + curl_slist_ptr http_headers; + std::vector res_buffer; + + curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str()); + curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); + curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(curl.get(), CURLOPT_VERBOSE, 1L); + typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data); + auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t { + auto data_vec = static_cast *>(data); + data_vec->insert(data_vec->end(), (char *)ptr, (char *)ptr + size * nmemb); + return size * nmemb; + }; + curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast(write_callback)); + curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_buffer); +#if defined(_WIN32) + curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA); +#endif + if (params.timeout > 0) { + curl_easy_setopt(curl.get(), CURLOPT_TIMEOUT, params.timeout); + } + if (params.max_size > 0) { + curl_easy_setopt(curl.get(), CURLOPT_MAXFILESIZE, params.max_size); + } + http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp"); + for (const auto & header : params.headers) { + http_headers.ptr = curl_slist_append(http_headers.ptr, header.c_str()); + } + curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr); + + CURLcode res = curl_easy_perform(curl.get()); + + if (res != CURLE_OK) { + std::string error_msg = curl_easy_strerror(res); + throw std::runtime_error("error: cannot make GET request: " + error_msg); + } + + long res_code; + curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code); + + return { res_code, std::move(res_buffer) }; +} + +#else + +bool common_has_curl() { + return false; +} + +static bool common_download_file_single_online(const std::string &, const std::string &, const std::string &) { + LOG_ERR("error: built without CURL, cannot download model from internet\n"); + return false; +} + +std::pair> common_remote_get_content(const std::string & url, const common_remote_params &) { + if (!url.empty()) { + throw std::runtime_error("error: built without CURL, cannot download model from the internet"); + } + + return {}; +} + +#endif // LLAMA_USE_CURL + +static bool common_download_file_single(const std::string & url, + const std::string & path, + const std::string & bearer_token, + bool offline) { + if (!offline) { + return common_download_file_single_online(url, path, bearer_token); + } + + if (!std::filesystem::exists(path)) { + LOG_ERR("%s: required file is not available in cache (offline mode): %s\n", __func__, path.c_str()); + return false; + } + + LOG_INF("%s: using cached file (offline mode): %s\n", __func__, path.c_str()); + return true; +} + // download multiple files from remote URLs to local paths // the input is a vector of pairs static bool common_download_file_multiple(const std::vector> & urls, const std::string & bearer_token, bool offline) { @@ -588,7 +662,7 @@ static bool common_download_model( if (n_split > 1) { char split_prefix[PATH_MAX] = {0}; - char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0}; + char split_url_prefix[LLAMA_MAX_URL_LENGTH] = {0}; // Verify the first split file format // and extract split URL and PATH prefixes @@ -609,7 +683,7 @@ static bool common_download_model( char split_path[PATH_MAX] = {0}; llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split); - char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0}; + char split_url[LLAMA_MAX_URL_LENGTH] = {0}; llama_split_path(split_url, sizeof(split_url), split_url_prefix, idx, n_split); if (std::string(split_path) == model.path) { @@ -626,50 +700,6 @@ static bool common_download_model( return true; } -std::pair> common_remote_get_content(const std::string & url, const common_remote_params & params) { - curl_ptr curl(curl_easy_init(), &curl_easy_cleanup); - curl_slist_ptr http_headers; - std::vector res_buffer; - - curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str()); - curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); - curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L); - typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data); - auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t { - auto data_vec = static_cast *>(data); - data_vec->insert(data_vec->end(), (char *)ptr, (char *)ptr + size * nmemb); - return size * nmemb; - }; - curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast(write_callback)); - curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_buffer); -#if defined(_WIN32) - curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA); -#endif - if (params.timeout > 0) { - curl_easy_setopt(curl.get(), CURLOPT_TIMEOUT, params.timeout); - } - if (params.max_size > 0) { - curl_easy_setopt(curl.get(), CURLOPT_MAXFILESIZE, params.max_size); - } - http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp"); - for (const auto & header : params.headers) { - http_headers.ptr = curl_slist_append(http_headers.ptr, header.c_str()); - } - curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr); - - CURLcode res = curl_easy_perform(curl.get()); - - if (res != CURLE_OK) { - std::string error_msg = curl_easy_strerror(res); - throw std::runtime_error("error: cannot make GET request: " + error_msg); - } - - long res_code; - curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code); - - return { res_code, std::move(res_buffer) }; -} - /** * Allow getting the HF file from the HF repo with tag (like ollama), for example: * - bartowski/Llama-3.2-3B-Instruct-GGUF:q4 @@ -736,21 +766,17 @@ static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_ std::string mmprojFile; if (res_code == 200 || res_code == 304) { - // extract ggufFile.rfilename in json, using regex - { - std::regex pattern("\"ggufFile\"[\\s\\S]*?\"rfilename\"\\s*:\\s*\"([^\"]+)\""); - std::smatch match; - if (std::regex_search(res_str, match, pattern)) { - ggufFile = match[1].str(); + try { + auto j = json::parse(res_str); + + if (j.contains("ggufFile") && j["ggufFile"].contains("rfilename")) { + ggufFile = j["ggufFile"]["rfilename"].get(); } - } - // extract mmprojFile.rfilename in json, using regex - { - std::regex pattern("\"mmprojFile\"[\\s\\S]*?\"rfilename\"\\s*:\\s*\"([^\"]+)\""); - std::smatch match; - if (std::regex_search(res_str, match, pattern)) { - mmprojFile = match[1].str(); + if (j.contains("mmprojFile") && j["mmprojFile"].contains("rfilename")) { + mmprojFile = j["mmprojFile"]["rfilename"].get(); } + } catch (const std::exception & e) { + throw std::runtime_error(std::string("error parsing manifest JSON: ") + e.what()); } if (!use_cache) { // if not using cached response, update the cache file @@ -770,45 +796,6 @@ static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_ return { hf_repo, ggufFile, mmprojFile }; } -#else - -bool common_has_curl() { - return false; -} - -static bool common_download_file_single(const std::string &, const std::string &, const std::string &, bool) { - LOG_ERR("error: built without CURL, cannot download model from internet\n"); - return false; -} - -static bool common_download_file_multiple(const std::vector> &, const std::string &, bool) { - LOG_ERR("error: built without CURL, cannot download model from the internet\n"); - return false; -} - -static bool common_download_model( - const common_params_model &, - const std::string &, - bool) { - LOG_ERR("error: built without CURL, cannot download model from the internet\n"); - return false; -} - -static struct common_hf_file_res common_get_hf_file(const std::string &, const std::string &, bool) { - LOG_ERR("error: built without CURL, cannot download model from the internet\n"); - return {}; -} - -std::pair> common_remote_get_content(const std::string & url, const common_remote_params &) { - if (!url.empty()) { - throw std::runtime_error("error: built without CURL, cannot download model from the internet"); - } - - return {}; -} - -#endif // LLAMA_USE_CURL - // // Docker registry functions //