From eddfb438502bd5d1014d63a812e9b6d03d326f8c Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 22 Mar 2025 03:40:11 -0500 Subject: [PATCH 01/19] vulkan: Optimize mul_mat_vec p021 and nc shaders (#12505) * tests: add mul_mat perf/functional tests for p021/nc vulkan shaders * vulkan: Optimize mul_mat_vec p021 and nc shaders. These shaders are used in attention calculations, and when the KV cache grows large they start to dominate the run time. For the nc shader (which is called with large 'k' dimension), use unrolling and vector loads. For the p021 shader (which is called with large 'm' and small 'k' dimensions), take advantage of grouped query attention to reuse loads from the A matrix for the whole group, and reduce the number of workgroups (too much overhead from tiny dispatches). Using subgroupAdd in the p021 shader also helps, use that conditionally. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 36 ++++- .../vulkan-shaders/mul_mat_vec_nc.comp | 72 +++++++-- .../vulkan-shaders/mul_mat_vec_p021.comp | 137 ++++++++++++++---- .../vulkan-shaders/vulkan-shaders-gen.cpp | 5 +- tests/test-backend-ops.cpp | 31 +++- 5 files changed, 228 insertions(+), 53 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 649504566..37fa8eec5 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -149,6 +149,7 @@ class vk_perf_logger; static void ggml_vk_destroy_buffer(vk_buffer& buf); static constexpr uint32_t mul_mat_vec_max_cols = 8; +static constexpr uint32_t p021_max_gqa_ratio = 8; enum vk_device_architecture { OTHER, @@ -231,6 +232,7 @@ struct vk_device_struct { bool uma; bool prefer_host_memory; bool float_controls_rte_fp16; + bool subgroup_add; bool subgroup_size_control; uint32_t subgroup_min_size; @@ -277,7 +279,7 @@ struct vk_device_struct { vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols]; vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT]; - vk_pipeline pipeline_mul_mat_vec_p021_f16_f32; + vk_pipeline pipeline_mul_mat_vec_p021_f16_f32[p021_max_gqa_ratio]; vk_pipeline pipeline_mul_mat_vec_nc_f16_f32; vk_pipeline pipeline_get_rows[GGML_TYPE_COUNT]; vk_pipeline pipeline_get_rows_f32[GGML_TYPE_COUNT]; @@ -2265,7 +2267,13 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1); + for (uint32_t i = 0; i < p021_max_gqa_ratio; ++i) { + if (device->subgroup_add && device->subgroup_require_full_support) { + ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_subgroup_add_len, mul_mat_vec_p021_f16_f32_subgroup_add_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true, true); + } else { + ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true); + } + } ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); @@ -2479,13 +2487,15 @@ static vk_device ggml_vk_get_device(size_t idx) { vk::PhysicalDeviceDriverProperties driver_props; vk::PhysicalDeviceShaderSMBuiltinsPropertiesNV sm_props; vk::PhysicalDeviceShaderCoreProperties2AMD amd_shader_core_properties2_props; + vk::PhysicalDeviceVulkan11Properties vk11_props; vk::PhysicalDeviceVulkan12Properties vk12_props; vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props; props2.pNext = &props3; props3.pNext = &subgroup_props; subgroup_props.pNext = &driver_props; - driver_props.pNext = &vk12_props; + driver_props.pNext = &vk11_props; + vk11_props.pNext = &vk12_props; VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_props; @@ -2549,6 +2559,9 @@ static vk_device ggml_vk_get_device(size_t idx) { } device->float_controls_rte_fp16 = vk12_props.shaderRoundingModeRTEFloat16; + device->subgroup_add = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) && + (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eArithmetic); + const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr; device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute; @@ -4635,9 +4648,15 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); const uint64_t d_sz = sizeof(float) * d_ne; + // With grouped query attention there are > 1 Q matrices per K, V matrix. + uint32_t gqa_ratio = (uint32_t)ne12 / (uint32_t)ne02; + if (gqa_ratio > 8 || gqa_ratio == 0 || ne12 != ne02 * gqa_ratio) { + gqa_ratio = 1; + } + if (dryrun) { // Request descriptor sets - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, 1); + ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1); return; } @@ -4661,8 +4680,15 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c // compute const std::array pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)) }; + + uint32_t workgroups_z = (uint32_t)ne12; + // When gqa_ratio > 1, each invocation does multiple rows and we can launch fewer workgroups + if (gqa_ratio > 1) { + workgroups_z /= gqa_ratio; + } + ggml_vk_sync_buffers(subctx); - ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32, { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, (uint32_t)ne12 }); + ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], { vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset } }, 6 * sizeof(uint32_t), &pc, { 1, (uint32_t)ne01, workgroups_z }); } static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp index 1cc4996d3..48376637f 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp @@ -12,6 +12,9 @@ layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; layout (binding = 2) writeonly buffer D {D_TYPE dst[];}; +layout (binding = 0) readonly buffer AV4 {A_TYPE_VEC4 data_a_v4[];}; +layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; + layout (push_constant) uniform parameter { uint ncols_x; @@ -37,25 +40,66 @@ void main() { const uint idst = channel*nrows_dst + row_dst; - tmp[tid] = 0.0f; + FLOAT_TYPE temp = 0.0f; - for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) { - const uint col_x = col_x0 + tid; + // Detect alignment for vector loads + bool is_aligned = (p.ncols_x % 4) == 0 && (p.row_stride_x % 4) == 0 && (p.channel_stride_x % 4) == 0; - if (col_x >= p.ncols_x) { - break; + for (uint col_x0 = 0; col_x0 < p.ncols_x;) { + + // Unroll 2x and do vec4 loads if aligned + const uint unroll_count = 2; + if (col_x0 + unroll_count * 4 * BLOCK_SIZE <= p.ncols_x && is_aligned) { + [[unroll]] for (uint i = 0; i < unroll_count; ++i) { + const uint col_x = col_x0 + 4*tid; + + const uint row_y = col_x; + + const uint ix = channel_x*p.channel_stride_x + row_x*p.row_stride_x + col_x; + const uint iy = channel*nrows_y + row_y; + + const vec4 av4 = vec4(data_a_v4[ix / 4]); + const vec4 bv4 = vec4(data_b_v4[iy / 4]); + + temp += dot(av4, bv4); + + col_x0 += 4*BLOCK_SIZE; + } + // do vec4 loads if aligned + } else if (col_x0 + 4*BLOCK_SIZE <= p.ncols_x && is_aligned) { + const uint col_x = col_x0 + 4*tid; + + const uint row_y = col_x; + + const uint ix = channel_x*p.channel_stride_x + row_x*p.row_stride_x + col_x; + const uint iy = channel*nrows_y + row_y; + + const vec4 av4 = vec4(data_a_v4[ix / 4]); + const vec4 bv4 = vec4(data_b_v4[iy / 4]); + + temp += dot(av4, bv4); + + col_x0 += 4*BLOCK_SIZE; + } else { + const uint col_x = col_x0 + tid; + if (col_x >= p.ncols_x) { + break; + } + + const uint row_y = col_x; + + const uint ix = channel_x*p.channel_stride_x + row_x*p.row_stride_x + col_x; + const uint iy = channel*nrows_y + row_y; + + const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]); + + temp = fma(xi, FLOAT_TYPE(data_b[iy]), temp); + col_x0 += BLOCK_SIZE; } - - const uint row_y = col_x; - - const uint ix = channel_x*p.channel_stride_x + row_x*p.row_stride_x + col_x; - const uint iy = channel*nrows_y + row_y; - - const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]); - - tmp[tid] = fma(xi, FLOAT_TYPE(data_b[iy]), tmp[tid]); } + tmp[tid] = temp; + // sum up partial sums and write back result barrier(); [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp index 9b443807d..7aa070eeb 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp @@ -2,16 +2,25 @@ #extension GL_EXT_control_flow_attributes : enable #extension GL_EXT_shader_16bit_storage : require +#if USE_SUBGROUP_ADD +#extension GL_KHR_shader_subgroup_arithmetic : enable +#endif -#define BLOCK_SIZE 32 #define FLOAT_TYPE float -layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; layout (binding = 2) writeonly buffer D {D_TYPE dst[];}; +layout (binding = 0) readonly buffer AV4 {A_TYPE_VEC4 data_a_v4[];}; +layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; + +layout(constant_id = 0) const int BLOCK_SIZE = 32; +// gqa_ratio is in the range [1,8] +layout(constant_id = 1) const uint gqa_ratio = 1; + layout (push_constant) uniform parameter { uint ncols_x; @@ -22,52 +31,124 @@ layout (push_constant) uniform parameter uint d_offset; } p; -shared FLOAT_TYPE tmp[BLOCK_SIZE]; +#if !USE_SUBGROUP_ADD +shared FLOAT_TYPE tmp[8][BLOCK_SIZE]; +#endif void main() { const uint tid = gl_LocalInvocationID.x; const uint row_x = gl_GlobalInvocationID.y; - const uint channel = gl_GlobalInvocationID.z; - const uint channel_x = channel / (p.nchannels_y / p.nchannels_x); + + uint channel, channel_x; + + // When gqa_ratio > 1, each invocation does multiple rows. + // The row in the A matrix is starting from channel / gqa_ratio and the + // rows in the B matrix are [channel, channel+gqa_ratio). + // When gpa_ratio is 1, each invocation does one row. + if (gqa_ratio > 1) { + channel_x = gl_GlobalInvocationID.z; + channel = channel_x * gqa_ratio; + } else { + channel = gl_GlobalInvocationID.z; + channel_x = channel / (p.nchannels_y / p.nchannels_x);; + } const uint nrows_y = p.ncols_x; const uint nrows_dst = p.nrows_x; const uint row_dst = row_x; - tmp[tid] = FLOAT_TYPE(0.0f); - - for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) { - const uint col_x = col_x0 + tid; - - if (col_x >= p.ncols_x) { - break; - } - - // x is transposed and permuted - const uint ix = row_x*p.nchannels_x*p.ncols_x + channel_x*p.ncols_x + col_x; - const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]); - - const uint row_y = col_x; - - // y is not transposed but permuted - const uint iy = channel*nrows_y + row_y; - - tmp[tid] = fma(xi, FLOAT_TYPE(data_b[iy]), tmp[tid]); + FLOAT_TYPE temp[8]; + [[unroll]] for (uint i = 0; i < 8; ++i) { + temp[i] = FLOAT_TYPE(0.0f); } - // dst is not transposed and not permuted - const uint idst = channel*nrows_dst + row_dst; + // Detect alignment for vector loads + bool is_aligned = (p.ncols_x % 4) == 0 && (p.nchannels_x % 4) == 0 && (nrows_y % 4) == 0; + for (uint col_x0 = 0; col_x0 < p.ncols_x; col_x0 += BLOCK_SIZE) { + + // Use vec4 loads if aligned + if (col_x0 + 4*BLOCK_SIZE <= p.ncols_x && is_aligned) { + + uint col_x = col_x0 + 4*tid; + const uint row_y = col_x; + + // x is transposed and permuted + const uint ix = row_x*p.nchannels_x*p.ncols_x + channel_x*p.ncols_x + col_x; + const vec4 av4 = vec4(data_a_v4[ix / 4]); + + [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { + // y is not transposed but permuted + const uint iy = (channel + c)*nrows_y + row_y; + + vec4 bv4 = data_b_v4[iy / 4]; + temp[c] += dot(av4, bv4); + } + + col_x0 += 3*BLOCK_SIZE; + } else { + const uint col_x = col_x0 + tid; + + if (col_x >= p.ncols_x) { + break; + } + + // x is transposed and permuted + const uint ix = row_x*p.nchannels_x*p.ncols_x + channel_x*p.ncols_x + col_x; + const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]); + + const uint row_y = col_x; + + [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { + // y is not transposed but permuted + const uint iy = (channel + c)*nrows_y + row_y; + + temp[c] = fma(xi, FLOAT_TYPE(data_b[iy]), temp[c]); + } + } + } + +#if USE_SUBGROUP_ADD + // reduce vec4 at a time + vec4 t = vec4(temp[0], temp[1], temp[2], temp[3]); + t = subgroupAdd(t); + temp[0] = t[0]; + temp[1] = t[1]; + temp[2] = t[2]; + temp[3] = t[3]; + if (gqa_ratio > 4) { + t = vec4(temp[4], temp[5], temp[6], temp[7]); + t = subgroupAdd(t); + temp[4] = t[0]; + temp[5] = t[1]; + temp[6] = t[2]; + temp[7] = t[3]; + } +#else + [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { + tmp[c][tid] = temp[c]; + } // sum up partial sums and write back result barrier(); [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tid < s) { - tmp[tid] += tmp[tid + s]; + [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { + temp[c] += tmp[c][tid + s]; + tmp[c][tid] = temp[c]; + } } barrier(); } + [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { + temp[c] = tmp[c][tid]; + } +#endif if (tid == 0) { - dst[idst] = tmp[0]; + [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { + // dst is not transposed and not permuted + const uint idst = (channel + c)*nrows_dst + row_dst; + dst[idst] = temp[c]; + } } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 519e610e3..1edb8267f 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -426,8 +426,9 @@ void process_shaders() { } } - string_to_spv("mul_mat_vec_p021_f16_f32", "mul_mat_vec_p021.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}); - string_to_spv("mul_mat_vec_nc_f16_f32", "mul_mat_vec_nc.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("mul_mat_vec_p021_f16_f32_subgroup_add", "mul_mat_vec_p021.comp", {{"A_TYPE", "float16_t"}, {"A_TYPE_VEC4", "f16vec4"}, {"B_TYPE", "float"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}, {"USE_SUBGROUP_ADD", "1"}}); + string_to_spv("mul_mat_vec_p021_f16_f32", "mul_mat_vec_p021.comp", {{"A_TYPE", "float16_t"}, {"A_TYPE_VEC4", "f16vec4"}, {"B_TYPE", "float"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}}); + string_to_spv("mul_mat_vec_nc_f16_f32", "mul_mat_vec_nc.comp", {{"A_TYPE", "float16_t"}, {"A_TYPE_VEC4", "f16vec4"}, {"B_TYPE", "float"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}}); // Norms string_to_spv("norm_f32", "norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index d48cd2172..9d7847d21 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1964,9 +1964,10 @@ struct test_mul_mat : public test_case { const std::array bs; // dims 3 and 4 const std::array nr; // repeat in dims 3 and 4 const std::array per; // permutation of dimensions + const bool v; // whether a is a non-contiguous view std::string vars() override { - return VARS_TO_STR8(type_a, type_b, m, n, k, bs, nr, per); + return VARS_TO_STR9(type_a, type_b, m, n, k, bs, nr, per, v); } double max_nmse_err() override { @@ -1986,8 +1987,9 @@ struct test_mul_mat : public test_case { int64_t m = 32, int64_t n = 32, int64_t k = 32, std::array bs = {10, 10}, std::array nr = {2, 2}, - std::array per = {0, 1, 2, 3}) - : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), per(per) {} + std::array per = {0, 1, 2, 3}, + bool v = false) + : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), per(per), v(v) {} ggml_tensor * build_graph(ggml_context * ctx) override { // C^T = A * B^T: (k, m) * (k, n) => (m, n) @@ -1997,6 +1999,7 @@ struct test_mul_mat : public test_case { const int npermuted = (per[0] != 0) + (per[1] != 1) + (per[2] != 2) + (per[3] != 3); if (npermuted > 0) { GGML_ASSERT(npermuted == 2); + GGML_ASSERT(!v); // not handled GGML_ASSERT(!ggml_is_quantized(type_a) || per[0] == 0); GGML_ASSERT(!ggml_is_quantized(type_b) || per[0] == 0); @@ -2020,7 +2023,13 @@ struct test_mul_mat : public test_case { ggml_set_name(a, "a_permuted"); ggml_set_name(b, "b_permuted"); } else { - a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]); + + if (v) { + a = ggml_new_tensor_4d(ctx, type_a, k*2, m, bs[0], bs[1]); + a = ggml_view_4d(ctx, a, k, m, bs[0], bs[1], a->nb[1], a->nb[2], a->nb[3], 0); + } else { + a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]); + } b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]); if (!ggml_is_quantized(type_a)) { if (bs[1] == 1 && nr[1] == 1) { @@ -4176,6 +4185,17 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1})); test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1})); + for (auto bs : {1,2,4,8}) { + for (auto nr : {1,4}) { + for (uint32_t m = 0; m < 2; ++m) { + for (uint32_t k = 0; k < 2; ++k) { + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056 + m, 1, 128 + k, {bs, 1}, {nr, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128 + m, 1, 1056 + k, {bs, 1}, {nr, 1}, {0, 1, 2, 3}, true)); + } + } + } + } + // sycl backend will limit task global_range < MAX_INT // test case for f16-type-convert-to-fp32 kernel with large k under fp32 compute dtype (occurs in stable-diffusion) // however this case needs to alloc more memory which may fail in some devices (Intel Arc770, etc.) @@ -4444,6 +4464,9 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16416, 1, 128, {8, 1}, {4, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 1, 16416, {8, 1}, {4, 1}, {0, 1, 2, 3}, true)); + for (int bs : {1, 2, 3, 4, 5, 8, 512}) { for (ggml_type type_a : all_types) { for (ggml_type type_b : {GGML_TYPE_F32}) { From fac63a3d786b2a0f97876c30add02cb525a9648e Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Sat, 22 Mar 2025 17:11:37 +0800 Subject: [PATCH 02/19] musa: refine compute capability (#12493) * musa: refine compute capability Signed-off-by: Xiaodong Ye * Address review comments Signed-off-by: Xiaodong Ye --------- Signed-off-by: Xiaodong Ye --- ggml/src/ggml-cuda/common.cuh | 48 ++++++++++++++++++++------------- ggml/src/ggml-cuda/fattn.cu | 2 +- ggml/src/ggml-cuda/ggml-cuda.cu | 10 +++---- ggml/src/ggml-cuda/mmq.cu | 4 +-- ggml/src/ggml-cuda/mmq.cuh | 12 ++++----- 5 files changed, 44 insertions(+), 32 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index e78205e5d..6b5cd32a4 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -41,14 +41,17 @@ #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) #define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons -#define GGML_CUDA_CC_PASCAL 600 -#define GGML_CUDA_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products -#define GGML_CUDA_CC_VOLTA 700 -#define GGML_CUDA_CC_TURING 750 -#define GGML_CUDA_CC_AMPERE 800 -#define GGML_CUDA_CC_ADA_LOVELACE 890 -#define GGML_CUDA_CC_OFFSET_AMD 0x1000000 +#define GGML_CUDA_CC_PASCAL 600 +#define GGML_CUDA_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products +#define GGML_CUDA_CC_VOLTA 700 +#define GGML_CUDA_CC_TURING 750 +#define GGML_CUDA_CC_AMPERE 800 +#define GGML_CUDA_CC_ADA_LOVELACE 890 +#define GGML_CUDA_CC_OFFSET_AMD 0x1000000 +#define GGML_CUDA_CC_OFFSET_MTHREADS 0x0100000 +#define GGML_CUDA_CC_IS_NVIDIA(cc) (cc < GGML_CUDA_CC_OFFSET_MTHREADS) +// AMD // GCN/CNDA, wave size is 64 #define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16 #define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue @@ -70,8 +73,17 @@ #define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA) #define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA && cc < GGML_CUDA_CC_RDNA1) -#define GGML_CUDA_CC_QY1 210 -#define GGML_CUDA_CC_QY2 220 +// Moore Threads +#define GGML_CUDA_MUSA_ARCH_IS_QY1 (__MUSA_ARCH__ <= 210) + +#define GGML_CUDA_CC_QY1 (GGML_MUSA_CC_OFFSET_MTHREADS + 0x210) // MTT S80, MTT S3000 +#define GGML_CUDA_CC_QY2 (GGML_MUSA_CC_OFFSET_MTHREADS + 0x220) // MTT S4000 +#define GGML_CUDA_CC_NG (GGML_MUSA_CC_OFFSET_MTHREADS + 0x310) // TBD + +#define GGML_CUDA_CC_IS_MTHREADS(cc) (cc >= GGML_CUDA_CC_OFFSET_MTHREADS && cc < GGML_CUDA_CC_OFFSET_AMD) +#define GGML_CUDA_CC_IS_QY1(cc) (cc >= GGML_CUDA_CC_QY1 && cc < GGML_CUDA_CC_QY2) +#define GGML_CUDA_CC_IS_QY2(cc) (cc >= GGML_CUDA_CC_QY2 && cc < GGML_CUDA_CC_NEXT) +#define GGML_CUDA_CC_IS_NG(cc) (cc >= GGML_CUDA_CC_NG) #ifdef __CUDA_ARCH_LIST__ constexpr bool ggml_cuda_has_arch_impl(int) { @@ -209,21 +221,21 @@ typedef float2 dfloat2; #define CP_ASYNC_AVAILABLE #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE -#if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= GGML_CUDA_CC_QY1) +#if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && GGML_CUDA_MUSA_ARCH_IS_QY1) #define FLASH_ATTN_AVAILABLE -#endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= GGML_CUDA_CC_QY1) +#endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && GGML_CUDA_MUSA_ARCH_IS_QY1) static bool fp16_available(const int cc) { return ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_PASCAL; } static bool fast_fp16_available(const int cc) { - return fp16_available(cc) && cc != 610; + return (GGML_CUDA_CC_IS_NVIDIA(cc) && fp16_available(cc) && cc != 610) || GGML_CUDA_CC_IS_AMD(cc); } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fast_fp16_hardware_available(const int cc) { - return cc >= GGML_CUDA_CC_PASCAL && cc != 610; + return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_PASCAL && cc != 610) || GGML_CUDA_CC_IS_AMD(cc); } // Any FP16 tensor core instructions are available for ggml code. @@ -231,20 +243,20 @@ static bool fp16_mma_available(const int cc) { #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) return false; #else - return cc < GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA || - GGML_CUDA_CC_IS_CDNA(cc) || cc >= GGML_CUDA_CC_RDNA3; + return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA || + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc); #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { - return cc < GGML_CUDA_CC_OFFSET_AMD && cc >= GGML_CUDA_CC_VOLTA || - GGML_CUDA_CC_IS_CDNA(cc) || cc >= GGML_CUDA_CC_RDNA3; + return GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA || + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc); } // Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. static bool new_mma_available(const int cc) { - return cc < GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_TURING; + return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_TURING; } static bool cp_async_available(const int cc) { diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index 973541893..8edc12649 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -253,7 +253,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst const int warp_size = ggml_cuda_info().devices[ggml_cuda_get_device()].warp_size; const enum ggml_prec prec = ggml_flash_attn_ext_get_prec(KQV); - if (cc >= GGML_CUDA_CC_OFFSET_AMD) { + if (GGML_CUDA_CC_IS_AMD(cc)) { #if defined(GGML_HIP_ROCWMMA_FATTN) if (fp16_mma_available(cc)) { ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index b783310ef..10d461b77 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -264,9 +264,9 @@ static ggml_cuda_device_info ggml_cuda_init() { #elif defined(GGML_USE_MUSA) // FIXME: Ensure compatibility with varying warp sizes across different MUSA archs. info.devices[id].warp_size = 32; - // TODO: refine the .cc to reflect MUSA's actual CC capabilities info.devices[id].smpbo = prop.sharedMemPerBlockOptin; - info.devices[id].cc = 100*prop.major + 10*prop.minor; + info.devices[id].cc = GGML_CUDA_CC_OFFSET_MTHREADS + prop.major * 0x100; + info.devices[id].cc += prop.minor * 0x10; GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); #else @@ -1188,11 +1188,11 @@ static void ggml_cuda_op_mul_mat_cublas( // ldc == nrows of the matrix that cuBLAS writes into int64_t ldc = id == ctx.device ? ne0 : row_diff; - const int compute_capability = ggml_cuda_info().devices[id].cc; + const int cc = ggml_cuda_info().devices[id].cc; const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; - if (compute_capability >= GGML_CUDA_CC_VOLTA && use_fp16) { + if (((cc >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc)) || GGML_CUDA_CC_IS_AMD(cc)) && use_fp16) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { @@ -1216,7 +1216,7 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); - if (GGML_CUDA_CC_IS_CDNA(compute_capability)) { + if (GGML_CUDA_CC_IS_CDNA(cc)) { const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK( diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 10f2ebb1c..510c1e9b2 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -28,7 +28,7 @@ void ggml_cuda_op_mul_mat_q( // Also its fixup needs to allocate a temporary buffer in the memory pool. // There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer. const bool use_stream_k = ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && - cc < GGML_CUDA_CC_OFFSET_AMD && src1_ncols == ne11; + GGML_CUDA_CC_IS_NVIDIA(cc) && src1_ncols == ne11; const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst, use_stream_k}; switch (src0->type) { @@ -145,7 +145,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return true; #endif //GGML_CUDA_FORCE_MMQ - if (cc < GGML_CUDA_CC_OFFSET_AMD) { + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { return !fp16_mma_hardware_available(cc) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index f2aca1f20..4ea8b8d4b 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -90,7 +90,7 @@ struct tile_x_sizes { static int get_mmq_x_max_host(const int cc) { return new_mma_available(cc) ? 128 : - ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD ? + ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc) ? #ifdef GGML_CUDA_FORCE_MMQ 128 : 64; #else @@ -123,8 +123,8 @@ static constexpr __device__ int get_mmq_x_max_device() { } static int get_mmq_y_host(const int cc) { - return cc >= GGML_CUDA_CC_OFFSET_AMD ? (GGML_CUDA_CC_IS_RDNA1(cc) ? 64 : 128) : - (ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA ? 128 : 64); + return GGML_CUDA_CC_IS_AMD(cc) ? (GGML_CUDA_CC_IS_RDNA1(cc) ? 64 : 128) : + ((ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc)) ? 128 : 64); } static constexpr __device__ int get_mmq_y_device() { @@ -2772,14 +2772,14 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a const int shmem = mmq_get_shmem(mmq_x, mmq_y, cc); -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) static bool shmem_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; if (!shmem_limit_raised[id]) { CUDA_CHECK(cudaFuncSetAttribute(mul_mat_q, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem)); CUDA_CHECK(cudaFuncSetAttribute(mul_mat_q, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem)); shmem_limit_raised[id] = true; } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) const int nty = (args.ne01 + mmq_y - 1) / mmq_y; const int ntx = (args.ne11 + mmq_x - 1) / mmq_x; @@ -2832,7 +2832,7 @@ void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cuda const int mmq_x_max = get_mmq_x_max_host(cc); const int mmq_y = get_mmq_y_host(cc); const int block_num_y = (args.ne01 + mmq_y - 1) / mmq_y; - const bool use_stream_k = ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD; + const bool use_stream_k = ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc); int mmq_x_best = 0; int nparts_best = INT_MAX; From ba932dfb50cc694645b1a148c72f8c06ee080b17 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 22 Mar 2025 16:23:26 +0200 Subject: [PATCH 03/19] ggml : fix quantized cpy op (#12310) * ggml : fix quantized cpy op ggml-ci * tests : add cpy tests for all types ggml-ci * tests : add BF16 copy tests ggml-ci * tests : fix loop for same-type copy ggml-ci * tests : add option to permute the dst tensor ggml-ci --- ggml/src/ggml-cpu/ggml-cpu.c | 58 +++++++++++++++++++----------------- tests/test-backend-ops.cpp | 40 ++++++++++++++++++------- 2 files changed, 61 insertions(+), 37 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 75dc96b47..2dbe83558 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -3110,17 +3110,17 @@ static void ggml_compute_forward_dup_same_cont( const int ith = params->ith; // thread index const int nth = params->nth; // number of threads - // parallelize by elements - const int ne = ggml_nelements(dst); - const int dr = (ne + nth - 1) / nth; - const int ie0 = dr * ith; - const int ie1 = MIN(ie0 + dr, ne); + // parallelize by blocks + const int nk = ggml_nelements(src0)/ggml_blck_size(src0->type); + const int dr = (nk + nth - 1) / nth; + const int k0 = dr * ith; + const int k1 = MIN(k0 + dr, nk); - if (ie0 < ie1) { + if (k0 < k1) { memcpy( - ((char *) dst->data + ie0*nb0), - ((char *) src0->data + ie0*nb0), - (ie1 - ie0) * nb0); + ((char *) dst->data + k0*nb0), + ((char *) src0->data + k0*nb0), + (k1 - k0) * nb0); } } @@ -4055,7 +4055,6 @@ static void ggml_compute_forward_dup_f32( static void ggml_compute_forward_dup_bytes( const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); @@ -4069,10 +4068,10 @@ static void ggml_compute_forward_dup_bytes( } const size_t type_size = ggml_type_size(src0->type); + const int ith = params->ith; // thread index const int nth = params->nth; // number of threads - // parallelize by rows const int nr = ne01; // number of rows per thread @@ -4082,10 +4081,10 @@ static void ggml_compute_forward_dup_bytes( const int ir1 = MIN(ir0 + dr, nr); if (src0->type == dst->type && - ne00 == ne0 && + ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { // copy by rows - const size_t rs = ne00 * type_size; + const size_t rs = ggml_row_size(src0->type, ne00); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = ir0; i01 < ir1; i01++) { @@ -4140,17 +4139,20 @@ static void ggml_compute_forward_dup_bytes( } // dst counters - - int64_t i10 = 0; + int64_t k10 = 0; int64_t i11 = 0; int64_t i12 = 0; int64_t i13 = 0; + // number of blocks in a row + const int64_t nk00 = ne00 / ggml_blck_size(src0->type); + const int64_t nk0 = ne0 / ggml_blck_size(dst->type); + for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; + k10 += nk00 * ir0; + while (k10 >= nk0) { + k10 -= nk0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { @@ -4162,14 +4164,14 @@ static void ggml_compute_forward_dup_bytes( } } for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); + for (int64_t k00 = 0; k00 < nk00; k00++) { + const char * src0_ptr = ((char *) src0->data + k00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + char * dst_ptr = ((char *) dst->data + k10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); memcpy(dst_ptr, src0_ptr, type_size); - if (++i10 == ne0) { - i10 = 0; + if (++k10 == nk0) { + k10 = 0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { @@ -4182,9 +4184,9 @@ static void ggml_compute_forward_dup_bytes( } } } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; + k10 += nk00 * (ne01 - ir1); + while (k10 >= nk0) { + k10 -= nk0; if (++i11 == ne1) { i11 = 0; if (++i12 == ne2) { @@ -14308,7 +14310,9 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm } // extra_buffer op? - if (ggml_cpu_extra_compute_forward(params, tensor)) return; + if (ggml_cpu_extra_compute_forward(params, tensor)) { + return; + } switch (tensor->op) { case GGML_OP_DUP: diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 9d7847d21..ebc32d791 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1463,11 +1463,13 @@ struct test_cpy : public test_case { const ggml_type type_src; const ggml_type type_dst; const std::array ne; - const std::array permute; + const std::array permute_src; + const std::array permute_dst; bool _src_use_permute; + bool _dst_use_permute; std::string vars() override { - return VARS_TO_STR4(type_src, type_dst, ne, permute); + return VARS_TO_STR5(type_src, type_dst, ne, permute_src, permute_dst); } double max_nmse_err() override { @@ -1480,9 +1482,11 @@ struct test_cpy : public test_case { test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32, std::array ne = {10, 10, 10, 1}, - std::array permute = {0, 0, 0, 0}) - : type_src(type_src), type_dst(type_dst), ne(ne), permute(permute), - _src_use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {} + std::array permute_src = {0, 0, 0, 0}, + std::array permute_dst = {0, 0, 0, 0}) + : type_src(type_src), type_dst(type_dst), ne(ne), permute_src(permute_src), permute_dst(permute_dst), + _src_use_permute(permute_src[0] + permute_src[1] + permute_src[2] + permute_src[3] > 0), + _dst_use_permute(permute_dst[0] + permute_dst[1] + permute_dst[2] + permute_dst[3] > 0) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data()); @@ -1490,13 +1494,18 @@ struct test_cpy : public test_case { ggml_set_name(src, "src"); if (_src_use_permute) { - src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]); + src = ggml_permute(ctx, src, permute_src[0], permute_src[1], permute_src[2], permute_src[3]); ggml_set_name(src, "src_permuted"); } - ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, src->ne); + ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, src->ne); ggml_set_name(dst, "dst"); + if (_dst_use_permute) { + dst = ggml_permute(ctx, dst, permute_dst[0], permute_dst[1], permute_dst[2], permute_dst[3]); + ggml_set_name(dst, "dst_permuted"); + } + ggml_tensor * out = ggml_cpy(ctx, src, dst); ggml_set_name(out, "out"); @@ -4004,14 +4013,25 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_set(GGML_TYPE_I32, GGML_TYPE_I32, {6, 5, 4, 3}, dim)); } - for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) { + // same-type copy + for (ggml_type type : all_types) { + const auto nk = ggml_blck_size(type); + + for (int k = 1; k < 4; ++k) { + test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4})); + test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}, {0, 3, 1, 2}, {0, 2, 1, 3})); + } + } + + for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_F32}) { for (ggml_type type_dst : all_types) { test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4})); test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows } } - for (ggml_type type_dst : {GGML_TYPE_F32}) { - for (ggml_type type_src : all_types) { + for (ggml_type type_src : all_types) { + for (ggml_type type_dst : {GGML_TYPE_F32}) { test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4})); test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows } From fbdfefe74e736f1a3687283c25ac21b11ba07b2e Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sat, 22 Mar 2025 23:28:19 +0100 Subject: [PATCH 04/19] llama : gemma3 : use output tensor if it exists in model weight (#12506) * llama : gemma3 : use output tensor if it exists in model weight * also add to the llm_tensor_names --- gguf-py/gguf/constants.py | 1 + src/llama-arch.cpp | 1 + src/llama-model.cpp | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index cc48913d9..13cca7ab0 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -1113,6 +1113,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { ], MODEL_ARCH.GEMMA3: [ MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_Q_NORM, diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 9debb56cc..8664f8963 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -778,6 +778,7 @@ static const std::map> LLM_TENSOR_N { { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 26ac5e99b..0ae754154 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2571,7 +2571,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; From 18b663d8e4ef352a9a15ff15d695fc3258801d60 Mon Sep 17 00:00:00 2001 From: Lars Sonchocky-Helldorf Date: Sun, 23 Mar 2025 09:21:48 +0100 Subject: [PATCH 05/19] install : add macports (#12518) MacPorts section added --- docs/install.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/install.md b/docs/install.md index 0e23a2c9e..4971c1828 100644 --- a/docs/install.md +++ b/docs/install.md @@ -9,6 +9,13 @@ brew install llama.cpp ``` The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggml-org/llama.cpp/discussions/7668 +## MacPorts + +```sh +sudo port install llama.cpp +``` +see also: https://ports.macports.org/port/llama.cpp/details/ + ## Nix On Mac and Linux, the Nix package manager can be used via From 77f9c6bbe55fccd9ea567794024cb80943947901 Mon Sep 17 00:00:00 2001 From: Marius Gerdes <141485318+mglambda@users.noreply.github.com> Date: Sun, 23 Mar 2025 19:30:26 +0100 Subject: [PATCH 06/19] server : Add verbose output to OAI compatible chat endpoint. (#12246) Add verbose output to server_task_result_cmpl_final::to_json_oaicompat_chat_stream, making it conform with server_task_result_cmpl_final::to_json_oaicompat_chat, as well as the other to_json methods. --- examples/server/server.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c2f1afeca..18caa9127 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -830,6 +830,11 @@ struct server_task_result_cmpl_final : server_task_result { ret.push_back({"timings", timings.to_json()}); } + // extra fields for debugging purposes + if (verbose) { + ret["__verbose"] = to_json_non_oaicompat(); + } + return ret; } }; From 9b169a4d4e01af7bc07a6981b53b27c18c9470d8 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 24 Mar 2025 01:56:17 -0500 Subject: [PATCH 07/19] vulkan: fix mul_mat_vec failure in backend tests (#12529) The OOB calculation could be wrong if the last iteration was during one of the unrolled loops. Adjust the unrolling counts to avoid this. Add a couple new backend tests that hit this failure on NVIDIA GPUs. --- .../vulkan-shaders/mul_mat_vec.comp | 20 +++++++++++++++++++ tests/test-backend-ops.cpp | 2 ++ 2 files changed, 22 insertions(+) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index 31ecd9f81..775b48cd0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -105,6 +105,16 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { int unroll_count = 4; uint unrolled_iters = num_iters & ~(unroll_count - 1); +#if K_PER_ITER == 2 + // If the K dimension is odd, we need lastiter==true on the last iteration + // so OOB is computed correctly. Skip some unrolling to make that happen. + if ((p.ncols & 1) != 0 && + unrolled_iters == num_iters && + unrolled_iters > 0) { + unrolled_iters -= unroll_count; + } +#endif + uint i = 0; while (i < unrolled_iters) { // Manually partially unroll the loop @@ -113,8 +123,18 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { i++; } } + unroll_count = 2; unrolled_iters = num_iters & ~(unroll_count - 1); + +#if K_PER_ITER == 2 + if ((p.ncols & 1) != 0 && + unrolled_iters == num_iters && + unrolled_iters > 0) { + unrolled_iters -= unroll_count; + } +#endif + while (i < unrolled_iters) { // Manually partially unroll the loop [[unroll]] for (uint k = 0; k < unroll_count; ++k) { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index ebc32d791..28f860a7f 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4204,6 +4204,8 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1})); test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1})); test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 193, {1, 1}, {4, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 67, {1, 1}, {4, 1}, {0, 2, 1, 3})); for (auto bs : {1,2,4,8}) { for (auto nr : {1,4}) { From c54f6b7988b63714b87b0390e01a1e69aee79a12 Mon Sep 17 00:00:00 2001 From: Prajwal B Mehendarkar Date: Mon, 24 Mar 2025 15:47:10 +0530 Subject: [PATCH 08/19] mmap : skip resource limit checks on AIX (#12541) --- src/llama-mmap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp index 3970b7485..9da97f1bc 100644 --- a/src/llama-mmap.cpp +++ b/src/llama-mmap.cpp @@ -476,7 +476,7 @@ struct llama_mlock::impl { char* errmsg = std::strerror(errno); bool suggest = (errno == ENOMEM); -#if defined(TARGET_OS_VISION) || defined(TARGET_OS_TV) +#if defined(TARGET_OS_VISION) || defined(TARGET_OS_TV) || defined(_AIX) // visionOS/tvOS dont't support RLIMIT_MEMLOCK // Skip resource limit checks on visionOS/tvOS suggest = false; From 7ea75035b67f44c22ed7039967f718011fd35ce5 Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Mon, 24 Mar 2025 18:28:34 +0800 Subject: [PATCH 09/19] CUDA: Fix clang warnings (#12540) Signed-off-by: Xiaodong Ye --- ggml/src/ggml-cuda/common.cuh | 4 ++-- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- ggml/src/ggml-cuda/mmq.cu | 4 ++-- ggml/src/ggml-cuda/mmq.cuh | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 6b5cd32a4..954ff5f16 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -243,14 +243,14 @@ static bool fp16_mma_available(const int cc) { #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) return false; #else - return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA || + return (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc); #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { - return GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA || + return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc); } diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 10d461b77..6dd5dcb85 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1192,7 +1192,7 @@ static void ggml_cuda_op_mul_mat_cublas( const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; - if (((cc >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc)) || GGML_CUDA_CC_IS_AMD(cc)) && use_fp16) { + if (((GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_AMD(cc)) && use_fp16) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 510c1e9b2..2c19485d5 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -27,8 +27,8 @@ void ggml_cuda_op_mul_mat_q( // The stream-k decomposition is only faster for recent NVIDIA GPUs. // Also its fixup needs to allocate a temporary buffer in the memory pool. // There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer. - const bool use_stream_k = ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && - GGML_CUDA_CC_IS_NVIDIA(cc) && src1_ncols == ne11; + const bool use_stream_k = GGML_CUDA_CC_IS_NVIDIA(cc) && + ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && src1_ncols == ne11; const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst, use_stream_k}; switch (src0->type) { diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index 4ea8b8d4b..ee0115425 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -90,7 +90,7 @@ struct tile_x_sizes { static int get_mmq_x_max_host(const int cc) { return new_mma_available(cc) ? 128 : - ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc) ? + GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA ? #ifdef GGML_CUDA_FORCE_MMQ 128 : 64; #else @@ -124,7 +124,7 @@ static constexpr __device__ int get_mmq_x_max_device() { static int get_mmq_y_host(const int cc) { return GGML_CUDA_CC_IS_AMD(cc) ? (GGML_CUDA_CC_IS_RDNA1(cc) ? 64 : 128) : - ((ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc)) ? 128 : 64); + ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) ? 128 : 64); } static constexpr __device__ int get_mmq_y_device() { @@ -2832,7 +2832,7 @@ void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cuda const int mmq_x_max = get_mmq_x_max_host(cc); const int mmq_y = get_mmq_y_host(cc); const int block_num_y = (args.ne01 + mmq_y - 1) / mmq_y; - const bool use_stream_k = ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && GGML_CUDA_CC_IS_NVIDIA(cc); + const bool use_stream_k = GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA; int mmq_x_best = 0; int nparts_best = INT_MAX; From 00d53800e00bb22a26bf710fa6bd1150e412cc1d Mon Sep 17 00:00:00 2001 From: compilade Date: Mon, 24 Mar 2025 06:47:24 -0400 Subject: [PATCH 10/19] llama-vocab : add SuperBPE pre-tokenizer (#12532) --- convert_hf_to_gguf.py | 3 +++ convert_hf_to_gguf_update.py | 1 + include/llama.h | 1 + src/llama-vocab.cpp | 10 ++++++++++ 4 files changed, 15 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index d21edce16..d9fa57027 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -705,6 +705,9 @@ class Model: if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e": # ref: https://huggingface.co/Xenova/gpt-4o res = "gpt-4o" + if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f": + # ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k + res = "superbpe" if res is None: logger.warning("\n") diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 07d3ce0e4..ca90cf592 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -110,6 +110,7 @@ models = [ {"name": "deepseek-v3", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-V3"}, {"name": "deepseek-r1-qwen", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"}, {"name": "gpt-4o", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Xenova/gpt-4o", }, + {"name": "superbpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k", }, ] diff --git a/include/llama.h b/include/llama.h index 6a44be404..25a9f8278 100644 --- a/include/llama.h +++ b/include/llama.h @@ -107,6 +107,7 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, + LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, }; enum llama_rope_type { diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index a708d8b88..2ddc8108f 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -400,6 +400,12 @@ struct llm_tokenizer_bpe : llm_tokenizer { "[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", }; break; + case LLAMA_VOCAB_PRE_TYPE_SUPERBPE: + regex_exprs = { + "\\p{N}+", + "(?=(\\d{3})+(?!\\d))", + }; + break; default: // default regex for BPE tokenization pre-processing regex_exprs = { @@ -1604,6 +1610,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { tokenizer_pre == "gpt-4o") { pre_type = LLAMA_VOCAB_PRE_TYPE_GPT4O; clean_spaces = false; + } else if ( + tokenizer_pre == "superbpe") { + pre_type = LLAMA_VOCAB_PRE_TYPE_SUPERBPE; + clean_spaces = false; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } From 3361e2deba0f24942877559b35c430dec68528c3 Mon Sep 17 00:00:00 2001 From: Tei Home Date: Mon, 24 Mar 2025 19:02:26 +0800 Subject: [PATCH 11/19] docs: update: improve the Fedoa CUDA guide (#12536) * docs: update fedora-cuda guide - Rename and place into Backend Folder. - Update Host-Supplied Packages. - Expand Recommended Users Section. * docs: improve the flow of CUDA-FEDORA.md --- .../CUDA-FEDORA.md} | 65 +++++++++++-------- docs/build.md | 10 +-- 2 files changed, 45 insertions(+), 30 deletions(-) rename docs/{cuda-fedora.md => backend/CUDA-FEDORA.md} (78%) diff --git a/docs/cuda-fedora.md b/docs/backend/CUDA-FEDORA.md similarity index 78% rename from docs/cuda-fedora.md rename to docs/backend/CUDA-FEDORA.md index 75cd2b499..1508faf77 100644 --- a/docs/cuda-fedora.md +++ b/docs/backend/CUDA-FEDORA.md @@ -14,9 +14,7 @@ In this guide we setup [Nvidia CUDA](https://docs.nvidia.com/cuda/) in a toolbox - [Creating a Fedora Toolbox Environment](#creating-a-fedora-toolbox-environment) - [Installing Essential Development Tools](#installing-essential-development-tools) - [Adding the CUDA Repository](#adding-the-cuda-repository) -- [Installing `nvidia-driver-libs`](#installing-nvidia-driver-libs) -- [Manually Resolving Package Conflicts](#manually-resolving-package-conflicts) -- [Finalizing the Installation of `nvidia-driver-libs`](#finalizing-the-installation-of-nvidia-driver-libs) +- [Installing Nvidia Driver Libraries](#installing-nvidia-driver-libraries) - [Installing the CUDA Meta-Package](#installing-the-cuda-meta-package) - [Configuring the Environment](#configuring-the-environment) - [Verifying the Installation](#verifying-the-installation) @@ -67,7 +65,7 @@ This guide focuses on Fedora hosts, but with small adjustments, it can work for sudo dnf distro-sync ``` -2. **Install the Default Text Editor (Optional):** +2. **Install **Vim** the default text editor (Optional):** ```bash sudo dnf install vim-default-editor --allowerasing @@ -97,36 +95,48 @@ After adding the repository, synchronize the package manager again: sudo dnf distro-sync ``` -## Installing `nvidia-driver-libs` and `nvidia-driver-cuda-libs` +## Installing Nvidia Driver Libraries -We need to detect if the host is supplying the [NVIDIA driver libraries into the toolbox](https://github.com/containers/toolbox/blob/main/src/pkg/nvidia/nvidia.go). +First, we need to detect if the host is supplying the [NVIDIA driver libraries into the toolbox](https://github.com/containers/toolbox/blob/main/src/pkg/nvidia/nvidia.go): ```bash ls -la /usr/lib64/libcuda.so.1 ``` +### If *`libcuda.so.1`* is missing: + +``` +ls: cannot access '/usr/lib64/libcuda.so.1': No such file or directory +``` + **Explanation:** +The host dose not supply the CUDA drivers, **install them now:** -- `nvidia-driver-libs` and `nvidia-driver-cuda-libs` contains necessary NVIDIA driver libraries required by CUDA, - on hosts with NVIDIA drivers installed the Fedora Container will supply the host libraries. - -### Install Nvidia Driver Libraries on Guest (if `libcuda.so.1` was NOT found). +#### Install the Nvidia Driver Libraries on Guest: ```bash -sudo dnf install nvidia-driver-libs nvidia-driver-cuda-libs +sudo dnf install nvidia-driver-cuda nvidia-driver-libs nvidia-driver-cuda-libs nvidia-persistenced ``` -### Manually Updating the RPM database for host-supplied NVIDIA drivers (if `libcuda.so.1` was found). +### If *`libcuda.so.1`* exists: +``` +lrwxrwxrwx. 1 root root 21 Mar 24 11:26 /usr/lib64/libcuda.so.1 -> libcuda.so.570.133.07 +``` -If the installation fails due to conflicts, we'll manually download and install the required packages, excluding conflicting files. +**Explanation:** +The host is supply the CUDA drivers, **we need to update the guest RPM Database accordingly:** -#### 1. Download `nvidia-driver-libs` and `nvidia-driver-cuda-libs` RPM's (with dependencies) +#### Update the Toolbox RPM Database to include the Host-Supplied Libraries: + +Note: we do not actually install the libraries, we just update the DB so that the guest system knows they are supplied by the host. + +##### 1. Download `nvidia-` parts that are supplied by the host RPM's (with dependencies) ```bash -sudo dnf download --destdir=/tmp/nvidia-driver-libs --resolve --arch x86_64 nvidia-driver-libs nvidia-driver-cuda-libs +sudo dnf download --destdir=/tmp/nvidia-driver-libs --resolve --arch x86_64 nvidia-driver-cuda nvidia-driver-libs nvidia-driver-cuda-libs nvidia-persistenced ``` -#### 2. Update the RPM database to assume the installation of these packages. +##### 2. Update the RPM database to assume the installation of these packages. ```bash sudo rpm --install --verbose --hash --justdb /tmp/nvidia-driver-libs/* @@ -134,23 +144,26 @@ sudo rpm --install --verbose --hash --justdb /tmp/nvidia-driver-libs/* **Note:** -- The `--justdb` option only updates the RPM database, without touching the filesystem. +- The `--justdb` option only updates the RPM database, without touching the filesystem elsewhere. -#### Finalizing the Installation of `nvidia-driver-libs` and `nvidia-driver-cuda-libs` +##### Check that the RPM Database has been correctly updated: + +**Note:** This is the same command as in the *"Install the Nvidia Driver Libraries on Guest"* for if *`libcuda.so.1`* was missing. -After manually installing the dependencies, run: ```bash -sudo dnf install nvidia-driver-libs nvidia-driver-cuda-libs +sudo dnf install nvidia-driver-cuda nvidia-driver-libs nvidia-driver-cuda-libs nvidia-persistenced ``` -You should receive a message indicating the package is already installed: +*(this time it will not install anything, as the database things that these packages are already installed)* ``` Updating and loading repositories: Repositories loaded. -Package "nvidia-driver-libs-3:570.86.10-1.fc41.x86_64" is already installed. -Package "nvidia-driver-cuda-libs-3:570.86.10-1.fc41.x86_64" is already installed. +Package "nvidia-driver-cuda-3:570.124.06-1.fc41.x86_64" is already installed. +Package "nvidia-driver-libs-3:570.124.06-1.fc41.x86_64" is already installed. +Package "nvidia-driver-cuda-libs-3:570.124.06-1.fc41.x86_64" is already installed. +Package "nvidia-persistenced-3:570.124.06-1.fc41.x86_64" is already installed. Nothing to do. ``` @@ -207,9 +220,9 @@ You should see output similar to: ``` nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2025 NVIDIA Corporation -Built on Wed_Jan_15_19:20:09_PST_2025 -Cuda compilation tools, release 12.8, V12.8.61 -Build cuda_12.8.r12.8/compiler.35404655_0 +Built on Fri_Feb_21_20:23:50_PST_2025 +Cuda compilation tools, release 12.8, V12.8.93 +Build cuda_12.8.r12.8/compiler.35583870_0 ``` This output confirms that the CUDA compiler is accessible and indicates the installed version. diff --git a/docs/build.md b/docs/build.md index 2e3975c14..7b5503a1f 100644 --- a/docs/build.md +++ b/docs/build.md @@ -132,12 +132,14 @@ You may find the official downloads here: [NVIDIA developer site](https://develo #### Compile and run inside a Fedora Toolbox Container -We also have a [guide](./cuda-fedora.md) for setting up CUDA toolkit in a Fedora [toolbox container](https://containertoolbx.org/). +We also have a [guide](./backend/CUDA-FEDORA.md) for setting up CUDA toolkit in a Fedora [toolbox container](https://containertoolbx.org/). **Recommended for:** - -- ***Particularly*** *convenient* for users of [Atomic Desktops for Fedora](https://fedoraproject.org/atomic-desktops/); such as: [Silverblue](https://fedoraproject.org/atomic-desktops/silverblue/) and [Kinoite](https://fedoraproject.org/atomic-desktops/kinoite/). -- Toolbox is installed by default: [Fedora Workstation](https://fedoraproject.org/workstation/) or [Fedora KDE Plasma Desktop](https://fedoraproject.org/spins/kde). +- ***Necessary*** for users of [Atomic Desktops for Fedora](https://fedoraproject.org/atomic-desktops/); such as: [Silverblue](https://fedoraproject.org/atomic-desktops/silverblue/) and [Kinoite](https://fedoraproject.org/atomic-desktops/kinoite/). + - (there are no supported CUDA packages for these systems) +- ***Necessary*** for users that have a host that is not a: [Supported Nvidia CUDA Release Platform](https://developer.nvidia.com/cuda-downloads). + - (for example, you may have [Fedora 42 Beta](https://fedoramagazine.org/announcing-fedora-linux-42-beta/) as your your host operating system) +- ***Convenient*** For those running [Fedora Workstation](https://fedoraproject.org/workstation/) or [Fedora KDE Plasma Desktop](https://fedoraproject.org/spins/kde), and want to keep their host system clean. - *Optionally* toolbox packages are available: [Arch Linux](https://archlinux.org/), [Red Hat Enterprise Linux >= 8.5](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux), or [Ubuntu](https://ubuntu.com/download) From 48d7021c61ceda6fcf1a7294d2115b8e1a53ae95 Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Mon, 24 Mar 2025 18:28:32 +0530 Subject: [PATCH 12/19] CI: fix SYCL build (#12546) --- ci/run.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index 9fc19c89d..edf580353 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -826,8 +826,10 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then fi ret=0 - -test $ret -eq 0 && gg_run ctest_debug +if [ -z ${GG_BUILD_SYCL} ]; then + # SYCL build breaks with debug build flags + test $ret -eq 0 && gg_run ctest_debug +fi test $ret -eq 0 && gg_run ctest_release if [ -z ${GG_BUILD_LOW_PERF} ]; then @@ -835,7 +837,9 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then test $ret -eq 0 && gg_run rerank_tiny if [ -z ${GG_BUILD_CLOUD} ] || [ ${GG_BUILD_EXTRA_TESTS_0} ]; then - test $ret -eq 0 && gg_run test_scripts_debug + if [ -z ${GG_BUILD_SYCL} ]; then + test $ret -eq 0 && gg_run test_scripts_debug + fi test $ret -eq 0 && gg_run test_scripts_release fi @@ -846,7 +850,9 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then test $ret -eq 0 && gg_run pythia_2_8b #test $ret -eq 0 && gg_run open_llama_7b_v2 fi - test $ret -eq 0 && gg_run ctest_with_model_debug + if [ -z ${GG_BUILD_SYCL} ]; then + test $ret -eq 0 && gg_run ctest_with_model_debug + fi test $ret -eq 0 && gg_run ctest_with_model_release fi fi From 2b65ae30299b9c67e25c51ee567e9a2ef22279ab Mon Sep 17 00:00:00 2001 From: lhez Date: Mon, 24 Mar 2025 09:20:47 -0700 Subject: [PATCH 13/19] opencl: simplify kernel embedding logic in cmakefile (#12503) Co-authored-by: Max Krasnyansky --- ggml/src/ggml-opencl/CMakeLists.txt | 160 +++++++--------------------- 1 file changed, 41 insertions(+), 119 deletions(-) diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt index 59a208fe9..7efb51c8e 100644 --- a/ggml/src/ggml-opencl/CMakeLists.txt +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -25,124 +25,46 @@ endif () if (GGML_OPENCL_EMBED_KERNELS) add_compile_definitions(GGML_OPENCL_EMBED_KERNELS) - set(OPENCL_CL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl.cl.h") - set(OPENCL_MM_CL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_mm.cl.h") - set(OPENCL_CVT_CL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_cvt.cl.h") + set(EMBED_KERNEL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/kernels/embed_kernel.py") + file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/autogenerated") - set(OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_gemv_noshuffle.cl.h") - set(OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_gemv_noshuffle_general.cl.h") - set(OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_mul_mat_Ab_Bi_8x4.cl.h") - set(OPENCL_TRANSPOSE_16_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_transpose_16.cl.h") - set(OPENCL_TRANSPOSE_32_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_transpose_32.cl.h") - set(OPENCL_TRANSPOSE_32_16_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_transpose_32_16.cl.h") - - set(EMBED_KERNEL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/kernels/embed_kernel.py") - file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated") - - include_directories("${CMAKE_BINARY_DIR}/autogenerated") - - # Python must be accessible from command line - add_custom_command( - OUTPUT ${OPENCL_CL_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl.cl - ${OPENCL_CL_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_MM_CL_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_mm.cl - ${OPENCL_MM_CL_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_mm.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_mm.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_CVT_CL_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_cvt.cl - ${OPENCL_CVT_CL_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_cvt.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_cvt.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_gemv_noshuffle.cl - ${OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_gemv_noshuffle.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_gemv_noshuffle.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_gemv_noshuffle_general.cl - ${OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_gemv_noshuffle_general.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_gemv_noshuffle_general.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl - ${OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_mul_mat_Ab_Bi_8x4.cl.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_TRANSPOSE_16_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_transpose_16.cl - ${OPENCL_TRANSPOSE_16_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_transpose_16.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_transpose_16.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_TRANSPOSE_32_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_transpose_32.cl - ${OPENCL_TRANSPOSE_32_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_transpose_32.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_transpose_32.cl.h" - ) - - add_custom_command( - OUTPUT ${OPENCL_TRANSPOSE_32_16_SOURCE_EMBED} - COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} - ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_transpose_32_16.cl - ${OPENCL_TRANSPOSE_32_16_SOURCE_EMBED} - DEPENDS kernels/ggml-opencl_transpose_32_16.cl ${EMBED_KERNEL_SCRIPT} - COMMENT "Generate ggml-opencl_transpose_32_16.cl.h" - ) - - target_sources(${TARGET_NAME} PRIVATE - ${OPENCL_CL_SOURCE_EMBED} - ${OPENCL_MM_CL_SOURCE_EMBED} - ${OPENCL_CVT_CL_SOURCE_EMBED} - ${OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED} - ${OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED} - ${OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED} - ${OPENCL_TRANSPOSE_16_SOURCE_EMBED} - ${OPENCL_TRANSPOSE_32_SOURCE_EMBED} - ${OPENCL_TRANSPOSE_32_16_SOURCE_EMBED}) -else () - # copy ggml-opencl.cl to bin directory - configure_file(kernels/ggml-opencl.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl.cl COPYONLY) - configure_file(kernels/ggml-opencl_mm.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_mm.cl COPYONLY) - configure_file(kernels/ggml-opencl_cvt.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_cvt.cl COPYONLY) - - configure_file(kernels/ggml-opencl_gemv_noshuffle.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_gemv_noshuffle.cl COPYONLY) - configure_file(kernels/ggml-opencl_gemv_noshuffle_general.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_gemv_noshuffle_general.cl COPYONLY) - configure_file(kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_mul_mat_Ab_Bi_8x4.cl COPYONLY) - configure_file(kernels/ggml-opencl_transpose_16.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_transpose_16.cl COPYONLY) - configure_file(kernels/ggml-opencl_transpose_32.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_transpose_32.cl COPYONLY) - configure_file(kernels/ggml-opencl_transpose_32_16.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_transpose_32_16.cl COPYONLY) + target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/autogenerated") endif () + +function(ggml_opencl_add_kernel KNAME) + set(KERN_HDR ${CMAKE_CURRENT_BINARY_DIR}/autogenerated/${KNAME}.cl.h) + set(KERN_SRC ${CMAKE_CURRENT_SOURCE_DIR}/kernels/${KNAME}.cl) + + if (GGML_OPENCL_EMBED_KERNELS) + message(STATUS "opencl: embedding kernel ${KNAME}") + + # Python must be accessible from command line + add_custom_command( + OUTPUT ${KERN_HDR} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} ${KERN_SRC} ${KERN_HDR} + DEPENDS ${KERN_SRC} ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ${KERN_HDR}" + ) + + target_sources(${TARGET_NAME} PRIVATE ${KERN_HDR}) + else () + message(STATUS "opencl: adding kernel ${KNAME}") + configure_file(${KERN_SRC} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${KNAME}.cl COPYONLY) + endif () +endfunction() + +set(GGML_OPENCL_KERNELS + ggml-opencl + ggml-opencl_mm + ggml-opencl_cvt + ggml-opencl_gemv_noshuffle + ggml-opencl_gemv_noshuffle_general + ggml-opencl_mul_mat_Ab_Bi_8x4 + ggml-opencl_transpose_16 + ggml-opencl_transpose_32 + ggml-opencl_transpose_32_16 +) + +foreach (K ${GGML_OPENCL_KERNELS}) + ggml_opencl_add_kernel(${K}) +endforeach() From c95fa362b3587d1822558f7e28414521075f254f Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Mon, 24 Mar 2025 23:05:38 +0530 Subject: [PATCH 14/19] ci: [SYCL] ggml-ci Use main GPU and enable sysman (#12547) --- ci/run.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ci/run.sh b/ci/run.sh index edf580353..038190a1b 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -52,7 +52,10 @@ if [ ! -z ${GG_BUILD_SYCL} ]; then echo "source /opt/intel/oneapi/setvars.sh" exit 1 fi - + # Use only main GPU + export ONEAPI_DEVICE_SELECTOR="level_zero:0" + # Enable sysman for correct memory reporting + export ZES_ENABLE_SYSMAN=1 CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=1 -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON" fi From 2d77d88e70d017cd82c3f1a4517e3102e2028ac4 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 25 Mar 2025 09:19:23 +0200 Subject: [PATCH 15/19] context : fix worst-case reserve outputs (#12545) ggml-ci --- src/llama-context.cpp | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 5bec63e2e..aa363df63 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -294,10 +294,7 @@ llama_context::llama_context( // TODO: something cleaner const auto n_outputs_save = n_outputs; - // max number of outputs - n_outputs = n_tokens; - - LLAMA_LOG_DEBUG("%s: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); + LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); int n_splits_pp = -1; int n_nodes_pp = -1; @@ -313,8 +310,15 @@ llama_context::llama_context( // reserve pp graph first so that buffers are only allocated once { llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + + // max number of outputs + n_outputs = ubatch_pp.n_tokens; + + LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_pp.n_tokens, ubatch_pp.n_seqs); + auto * gf = graph_init(); graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { throw std::runtime_error("failed to allocate compute pp buffers"); } @@ -326,11 +330,18 @@ llama_context::llama_context( // reserve with tg graph to get the number of splits and nodes { llama_ubatch ubatch_tg = { true, 1, 1, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + + n_outputs = ubatch_tg.n_tokens; + + LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_tg.n_tokens, ubatch_tg.n_seqs); + auto * gf = graph_init(); graph_build(ctx_compute.get(), gf, ubatch_tg, LLM_GRAPH_TYPE_DEFAULT); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { throw std::runtime_error("failed to allocate compute tg buffers"); } + n_splits_tg = ggml_backend_sched_get_n_splits(sched.get()); n_nodes_tg = ggml_graph_n_nodes(gf); } @@ -338,8 +349,14 @@ llama_context::llama_context( // reserve again with pp graph to avoid ggml-alloc reallocations during inference { llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + + n_outputs = ubatch_pp.n_tokens; + + LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_pp.n_tokens, ubatch_pp.n_seqs); + auto * gf = graph_init(); graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); + if (!ggml_backend_sched_reserve(sched.get(), gf)) { throw std::runtime_error("failed to allocate compute pp buffers"); } From 3cd3a395323fa9cdf6ecfa1fea290bf228d4e856 Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Tue, 25 Mar 2025 15:45:08 +0800 Subject: [PATCH 16/19] ci: [MUSA] add CI and update doc (#12562) Signed-off-by: Xiaodong Ye --- ci/README.md | 39 +++++++++++++++++++++++++++++++++++++++ ci/run.sh | 11 ++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/ci/README.md b/ci/README.md index 8245c9df6..db4d90668 100644 --- a/ci/README.md +++ b/ci/README.md @@ -26,4 +26,43 @@ GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt # with SYCL support source /opt/intel/oneapi/setvars.sh GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt + +# with MUSA support +GG_BUILD_MUSA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt ``` + +## Running MUSA CI in a Docker Container + +Assuming `$PWD` is the root of the `llama.cpp` repository, follow these steps to set up and run MUSA CI in a Docker container: + +### 1. Create a local directory to store cached models, configuration files and venv: + +```bash +mkdir -p $HOME/llama.cpp/ci-cache +``` + +### 2. Create a local directory to store CI run results: + +```bash +mkdir -p $HOME/llama.cpp/ci-results +``` + +### 3. Start a Docker container and run the CI: + +```bash +docker run --privileged -it \ + -v $HOME/llama.cpp/ci-cache:/ci-cache \ + -v $HOME/llama.cpp/ci-results:/ci-results \ + -v $PWD:/ws -w /ws \ + mthreads/musa:rc3.1.1-devel-ubuntu22.04 +``` + +Inside the container, execute the following commands: + +```bash +apt update -y && apt install -y cmake git python3.10-venv wget +git config --global --add safe.directory /ws +GG_BUILD_MUSA=1 bash ./ci/run.sh /ci-results /ci-cache +``` + +This setup ensures that the CI runs within an isolated Docker environment while maintaining cached files and results across runs. diff --git a/ci/run.sh b/ci/run.sh index 038190a1b..efc24391d 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -16,6 +16,9 @@ # # with VULKAN support # GG_BUILD_VULKAN=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt # +# # with MUSA support +# GG_BUILD_MUSA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt +# if [ -z "$2" ]; then echo "usage: $0 " @@ -62,6 +65,12 @@ fi if [ ! -z ${GG_BUILD_VULKAN} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_VULKAN=1" fi + +if [ ! -z ${GG_BUILD_MUSA} ]; then + # Use qy1 by default (MTT S80) + MUSA_ARCH=${MUSA_ARCH:-21} + CMAKE_EXTRA="-DGGML_MUSA=ON -DMUSA_ARCHITECTURES=${MUSA_ARCH}" +fi ## helpers # download a file if it does not exist or if it is outdated @@ -811,7 +820,7 @@ export LLAMA_LOG_PREFIX=1 export LLAMA_LOG_TIMESTAMPS=1 if [ -z ${GG_BUILD_LOW_PERF} ]; then - # Create symlink: ./llama.cpp/models-mnt -> $MNT/models/models-mnt + # Create symlink: ./llama.cpp/models-mnt -> $MNT/models rm -rf ${SRC}/models-mnt mnt_models=${MNT}/models mkdir -p ${mnt_models} From 36ee06dd2dcf8d3d1157ebe36366d7670770e75f Mon Sep 17 00:00:00 2001 From: Dan Johansson Date: Tue, 25 Mar 2025 10:35:20 +0100 Subject: [PATCH 17/19] docs : add build instructions for KleidiAI (#12563) Signed-off-by: Dan Johansson --- docs/build.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docs/build.md b/docs/build.md index 7b5503a1f..fbf12c766 100644 --- a/docs/build.md +++ b/docs/build.md @@ -435,6 +435,26 @@ llama_new_context_with_model: CANN compute buffer size = 1260.81 MiB For detailed info, such as model/device supports, CANN install, please refer to [llama.cpp for CANN](./backend/CANN.md). +## ArmĀ® KleidiAIā„¢ +KleidiAI is a library of optimized microkernels for AI workloads, specifically designed for Arm CPUs. These microkernels enhance performance and can be enabled for use by the CPU backend. + +To enable KleidiAI, go to the llama.cpp directory and build using CMake +```bash +cmake -B build -DGGML_CPU_KLEIDIAI=ON +cmake --build build --config Release +``` +You can verify that KleidiAI is being used by running +```bash +./build/bin/llama-cli -m PATH_TO_MODEL -p "What is a car?" +``` +If KleidiAI is enabled, the ouput will contain a line similar to: +``` +load_tensors: CPU_KLEIDIAI model buffer size = 3474.00 MiB +``` +KleidiAI's microkernels implement optimized tensor operations using Arm CPU features such as dotprod, int8mm and SME. llama.cpp selects the most efficient kernel based on runtime CPU feature detection. However, on platforms that support SME, you must manually enable SME microkernels by setting the environment variable `GGML_KLEIDIAI_SME=1`. + +Depending on your build target, other higher priority backends may be enabled by default. To ensure the CPU backend is used, you must disable the higher priority backends either at compile time, e.g. -DGGML_METAL=OFF, or during run-time using the command line option `--device none`. + ## Android To read documentation for how to build on Android, [click here](./android.md) From e2f560175a195f63c3276972a3d1caec0bd13e05 Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Tue, 25 Mar 2025 16:10:18 +0530 Subject: [PATCH 18/19] SYCL: disable Q4_0 reorder optimization (#12560) ggml-ci --- ggml/src/ggml-sycl/ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index f4b68333e..9fa24b980 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -191,7 +191,7 @@ static void ggml_check_sycl() try { if (!initialized) { g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0); - g_ggml_sycl_disable_optimize= get_sycl_env("GGML_SYCL_DISABLE_OPT", 0); + g_ggml_sycl_disable_optimize= get_sycl_env("GGML_SYCL_DISABLE_OPT", 1); g_ggml_sycl_disable_graph = get_sycl_env("GGML_SYCL_DISABLE_GRAPH", 1); GGML_SYCL_DEBUG("[SYCL] call ggml_check_sycl\n"); GGML_LOG_INFO("Running with Environment Variables:\n"); From 053b3f9aae63151732eccf6b7408c6418ba8746e Mon Sep 17 00:00:00 2001 From: Dan Johansson Date: Tue, 25 Mar 2025 12:10:18 +0100 Subject: [PATCH 19/19] ggml-cpu : update KleidiAI to v1.5.0 (#12568) ggml-cpu : bug fix related to KleidiAI LHS packing Signed-off-by: Dan Johansson --- ggml/src/ggml-cpu/CMakeLists.txt | 4 ++-- ggml/src/ggml-cpu/kleidiai/kernels.cpp | 9 ++------- ggml/src/ggml-cpu/kleidiai/kernels.h | 1 - ggml/src/ggml-cpu/kleidiai/kleidiai.cpp | 7 +++---- 4 files changed, 7 insertions(+), 14 deletions(-) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 6aa078a93..cb71e9b39 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -359,9 +359,9 @@ function(ggml_add_cpu_backend_variant_impl tag_name) # Fetch KleidiAI sources: include(FetchContent) - set(KLEIDIAI_COMMIT_TAG "v1.3.0") + set(KLEIDIAI_COMMIT_TAG "v1.5.0") set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz") - set(KLEIDIAI_ARCHIVE_MD5 "060bd2dc64642b091f461cc8dd7426d9") + set(KLEIDIAI_ARCHIVE_MD5 "ea22e1aefb800e9bc8c74d91633cc58e") if (POLICY CMP0135) cmake_policy(SET CMP0135 NEW) diff --git a/ggml/src/ggml-cpu/kleidiai/kernels.cpp b/ggml/src/ggml-cpu/kleidiai/kernels.cpp index a8a59a887..aacc2bb5e 100644 --- a/ggml/src/ggml-cpu/kleidiai/kernels.cpp +++ b/ggml/src/ggml-cpu/kleidiai/kernels.cpp @@ -51,11 +51,10 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = { /* .run_kernel = */ kai_run_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot, }, /* .lhs_info = */ { - /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32, - /* .get_packed_offset = */ kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32, + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qsi8d32p_f32_neon, + /* .get_packed_offset = */ kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32_neon, /* .packed_size = */ kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32_neon, /* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32_neon, - /* .require_aligned_m_idx = */ true, }, /* .rhs_info = */ { /* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon, @@ -100,7 +99,6 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = { /* .get_packed_offset = */ kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32, /* .packed_size = */ kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32, /* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32, - /* .require_aligned_m_idx = */ false, }, /* .rhs_info = */ { /* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, @@ -144,7 +142,6 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = { /* .get_packed_offset = */ kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32, /* .packed_size = */ kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32, /* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32, - /* .require_aligned_m_idx = */ false, }, /* .rhs_info = */ { /* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, @@ -189,7 +186,6 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = { /* .get_packed_offset = */ kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32, /* .packed_size = */ kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32, /* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32, - /* .require_aligned_m_idx = */ false, }, /* .rhs_info = */ { /* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, @@ -233,7 +229,6 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = { /* .get_packed_offset = */ kai_get_lhs_packed_offset_lhs_quant_pack_qsi8d32p_f32, /* .packed_size = */ kai_get_lhs_packed_size_lhs_quant_pack_qsi8d32p_f32, /* .pack_func = */ kai_run_lhs_quant_pack_qsi8d32p_f32, - /* .require_aligned_m_idx = */ false, }, /* .rhs_info = */ { /* .packed_size = */ kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0, diff --git a/ggml/src/ggml-cpu/kleidiai/kernels.h b/ggml/src/ggml-cpu/kleidiai/kernels.h index a0b0d1493..2ffe97eb4 100644 --- a/ggml/src/ggml-cpu/kleidiai/kernels.h +++ b/ggml/src/ggml-cpu/kleidiai/kernels.h @@ -40,7 +40,6 @@ struct lhs_packing_info { size_t (*packed_size)(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr); void (*pack_func)(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr, size_t m_idx_start, const float* lhs, size_t lhs_stride, void* lhs_packed); - bool require_aligned_m_idx; }; struct rhs_packing_info { diff --git a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp index 4dff5c67e..4e89ca0fa 100644 --- a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +++ b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp @@ -124,8 +124,7 @@ class tensor_traits : public ggml::cpu::tensor_traits { size_t sr = kernel->get_sr(); // Calculate number of columns to be processed per thread - const bool use_multithread = lhs_info->require_aligned_m_idx && m <= mr ? false : true; - const size_t num_m_per_thread = use_multithread ? kai_roundup(m, nth) / nth : m; + const size_t num_m_per_thread = kai_roundup(m, mr * nth) / nth; const size_t m_start = ith * num_m_per_thread; size_t m_to_process = num_m_per_thread; if ((m_start + m_to_process) > m) { @@ -135,11 +134,11 @@ class tensor_traits : public ggml::cpu::tensor_traits { if(m_start < m) { // Transform LHS const size_t src_stride = src1->nb[1]; - const float * src_ptr = reinterpret_cast(lhs + lhs_info->get_offset(0, dst->src[1]->nb[1])); + const float * src_ptr = reinterpret_cast(lhs + lhs_info->get_offset(m_start, dst->src[1]->nb[1])); const size_t lhs_packed_offset = lhs_info->get_packed_offset(m_start, k, QK4_0, mr, kr, sr); void * lhs_packed_ptr = static_cast(lhs_packed + lhs_packed_offset); - lhs_info->pack_func(m_to_process, k, QK4_0, mr, kr, sr, m_start, src_ptr, src_stride, lhs_packed_ptr); + lhs_info->pack_func(m_to_process, k, QK4_0, mr, kr, sr, 0, src_ptr, src_stride, lhs_packed_ptr); } ggml_barrier(params->threadpool);