Merge branch 'upstream' into concedo_experimental

# Conflicts:
#	ggml/src/ggml-cann/aclnn_ops.cpp
#	ggml/src/ggml-cann/aclnn_ops.h
#	ggml/src/ggml-cann/common.h
#	ggml/src/ggml-cann/ggml-cann.cpp
#	tests/test-backend-ops.cpp
This commit is contained in:
Concedo 2025-04-18 12:44:20 +08:00
commit bce519cee7
12 changed files with 130 additions and 34 deletions

View file

@ -10153,7 +10153,7 @@ struct llm_build_deepseek2 : public llm_graph_context {
// See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(n_embd_head_k));
const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));
const float attn_factor = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));
ggml_tensor * cur;
ggml_tensor * inpL;
@ -10230,13 +10230,13 @@ struct llm_build_deepseek2 : public llm_graph_context {
q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor_scaled, beta_fast, beta_slow
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(q_pe, "q_pe", il);
k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor_scaled, beta_fast, beta_slow
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(k_pe, "k_pe", il);