mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2026-05-17 04:09:19 +00:00
* git-friendly migration * add build_graph * nits * exclude old code from build * wip * add llm_arch_model_i * prepare downstream functions * nits * nits * wip * wip * add back create_tensor_qkv * fix files missing include * enforce one llm_build per arch * cmake: use glob * missing model params * nits * wip * wip (2) * wip (3) * test-llama-archs is happy * improve switch case * move more stuff into llm_arch_model_i * fix downstream code * nits * nits (2) * fix order * llama_model_base * LLAMA_LOAD_LOCALS * small fix * fix build errors * auto * rm migration script and ifdef
438 lines
20 KiB
C++
438 lines
20 KiB
C++
#include "models.h"
|
|
|
|
void llama_model_deepseek2::load_arch_hparams(llama_model_loader & ml) {
|
|
const auto n_vocab = vocab.n_tokens();
|
|
|
|
// lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B, Kanana-2-30B-A3B
|
|
const bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26 || (hparams.n_layer == 48 && n_vocab == 128256));
|
|
|
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead, false);
|
|
if (!is_lite) {
|
|
ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
|
|
}
|
|
ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
|
|
ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH_MLA, hparams.n_embd_head_k_mla_impl, false);
|
|
ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH_MLA, hparams.n_embd_head_v_mla_impl, false);
|
|
ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
|
|
ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
|
|
ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale, false);
|
|
ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
|
|
ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
|
|
if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
|
|
// for compatibility with existing DeepSeek V2 and V2.5 GGUFs
|
|
// that have no expert_gating_func model parameter set
|
|
if ((hparams.n_layer == 47 || hparams.n_layer == 48) && n_vocab == 154880) {
|
|
// GLM 4.7 Lite
|
|
hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID;
|
|
} else {
|
|
hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
|
|
}
|
|
}
|
|
|
|
if (ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false)) {
|
|
// [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX]
|
|
// cancel the factor from the convert script
|
|
hparams.rope_yarn_log_mul /= 0.1f;
|
|
}
|
|
|
|
// (optional) temperature tuning - used by mistral-large
|
|
ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE, hparams.f_attn_temp_scale, false);
|
|
ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false); // FIXME why not use temperature_length?
|
|
|
|
hparams.f_attn_temp_offset = 0.0f;
|
|
|
|
switch (hparams.n_layer) {
|
|
case 27: type = LLM_TYPE_16B; break;
|
|
case 47: type = LLM_TYPE_30B_A3B; break;
|
|
case 60: type = LLM_TYPE_236B; break;
|
|
case 61: type = LLM_TYPE_671B; break;
|
|
default: type = LLM_TYPE_UNKNOWN;
|
|
}
|
|
}
|
|
|
|
void llama_model_deepseek2::load_arch_tensors(llama_model_loader &) {
|
|
LLAMA_LOAD_LOCALS;
|
|
const int64_t n_expert_shared = hparams.n_expert_shared;
|
|
|
|
const bool is_mla = hparams.is_mla();
|
|
|
|
// note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
|
|
const int64_t n_embd_head_k_mla = hparams.n_embd_head_k_mla();
|
|
const int64_t n_embd_head_v_mla = hparams.n_embd_head_v_mla();
|
|
|
|
const int64_t n_embd_head_qk_rope = hparams.n_rot();
|
|
const int64_t n_embd_head_qk_nope = n_embd_head_k_mla - n_embd_head_qk_rope;
|
|
GGML_ASSERT(n_embd_head_qk_nope >= 1);
|
|
|
|
const int64_t q_lora_rank = hparams.n_lora_q;
|
|
const int64_t kv_lora_rank = hparams.n_lora_kv;
|
|
|
|
const int64_t n_ff_exp = hparams.n_ff_exp;
|
|
|
|
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
|
|
|
// output
|
|
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
|
|
// try to load output.weight, if not found, use token_embd (tied embeddings)
|
|
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
|
|
if (!output) {
|
|
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
|
|
}
|
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
auto & layer = layers[i];
|
|
|
|
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
|
|
if (q_lora_rank > 0) {
|
|
layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
|
|
}
|
|
|
|
layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
|
|
|
|
if (q_lora_rank > 0) {
|
|
layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
|
|
layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k_mla}, 0);
|
|
} else {
|
|
layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_embd_head_k_mla}, 0);
|
|
}
|
|
|
|
layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + n_embd_head_qk_rope}, 0);
|
|
|
|
// note: only old legacy GGUF files will have the unsplit wkv_b tensor in
|
|
if (is_mla) {
|
|
layer.wk_b = create_tensor(tn(LLM_TENSOR_ATTN_K_B, "weight", i), {n_embd_head_qk_nope, kv_lora_rank, n_head}, 0);
|
|
layer.wv_b = create_tensor(tn(LLM_TENSOR_ATTN_V_B, "weight", i), {kv_lora_rank, n_embd_head_v_mla, n_head}, 0);
|
|
} else {
|
|
layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v_mla)}, 0);
|
|
}
|
|
|
|
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_embd_head_v_mla, n_embd}, 0);
|
|
|
|
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
|
|
|
|
if (i < (int) hparams.n_layer_dense_lead) {
|
|
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
|
|
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
|
|
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
|
|
} else {
|
|
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
|
|
layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
|
|
|
|
if (n_expert == 0) {
|
|
throw std::runtime_error("n_expert must be > 0");
|
|
}
|
|
if (n_expert_used == 0) {
|
|
throw std::runtime_error("n_expert_used must be > 0");
|
|
}
|
|
|
|
// MoE branch
|
|
layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
|
|
create_tensor_gate_up_exps(layer, i, n_embd, n_ff_exp, n_expert, 0);
|
|
|
|
// Shared expert branch
|
|
layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
|
|
layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
|
|
layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
|
|
}
|
|
}
|
|
}
|
|
|
|
std::unique_ptr<llm_graph_context> llama_model_deepseek2::build_arch_graph(const llm_graph_params & params) const {
|
|
return std::make_unique<graph>(*this, params);
|
|
}
|
|
|
|
llama_model_deepseek2::graph::graph(const llama_model & model, const llm_graph_params & params) :
|
|
llm_graph_context(params) {
|
|
// lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B
|
|
bool is_ocr = model.arch == LLM_ARCH_DEEPSEEK2OCR;
|
|
|
|
const bool is_mla = hparams.is_mla();
|
|
|
|
// note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
|
|
const int64_t n_embd_head_k = hparams.n_embd_head_k_mla();
|
|
const int64_t n_embd_head_v = hparams.n_embd_head_v_mla();
|
|
|
|
const int64_t n_embd_head_qk_rope = hparams.n_rot();
|
|
const int64_t n_embd_head_qk_nope = n_embd_head_k - n_embd_head_qk_rope;
|
|
|
|
const uint32_t kv_lora_rank = hparams.n_lora_kv;
|
|
|
|
// We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
|
|
// See https://github.com/ggml-org/llama.cpp/discussions/7416 for detailed explanation.
|
|
// And also: https://github.com/ggml-org/llama.cpp/pull/17945 [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX]
|
|
|
|
// first cancel the adjustment from llama_hparams::yarn_attn_factor_adjust to get the original attn_factor
|
|
GGML_ASSERT(ext_factor >= 0.0f);
|
|
const float attn_factor_org = attn_factor * (1.0f + 0.1f * logf(1.0f / freq_scale));
|
|
|
|
// use the original attn_factor to pre-scale the kq_scale
|
|
const float mscale = attn_factor_org * (1.0f + 0.1f * hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
|
|
const float kq_scale = 1.0f * mscale * mscale / sqrtf(float(n_embd_head_k));
|
|
|
|
ggml_tensor * cur;
|
|
ggml_tensor * inpL;
|
|
|
|
// {n_embd, n_tokens}
|
|
inpL = build_inp_embd(model.tok_embd);
|
|
|
|
// (optional) temperature tuning - used by mistral-large
|
|
ggml_tensor * inp_attn_scale = nullptr;
|
|
if (hparams.f_attn_temp_scale != 0.0f) {
|
|
inp_attn_scale = build_inp_attn_scale();
|
|
}
|
|
|
|
// inp_pos - contains the positions
|
|
ggml_tensor * inp_pos = build_inp_pos();
|
|
|
|
auto * inp_attn_kv = !is_mla ? build_attn_inp_kv() : nullptr;
|
|
auto * inp_attn_k = is_mla ? build_attn_inp_k() : nullptr;
|
|
|
|
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
|
|
int effective_n_layers = hparams.n_layer - hparams.nextn_predict_layers;
|
|
for (int il = 0; il < effective_n_layers; ++il) {
|
|
ggml_tensor * inpSA = inpL;
|
|
|
|
// norm
|
|
cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
|
|
cb(cur, "attn_norm", il);
|
|
|
|
// self_attention
|
|
if (is_ocr) {
|
|
const int n_embed_head = hparams.n_embd / hparams.n_head();
|
|
const int ocr_rope_type = GGML_ROPE_TYPE_NEOX;
|
|
GGML_ASSERT(n_embed_head == n_embd_head_k && n_embed_head == n_embd_head_v);
|
|
|
|
ggml_tensor * Qcur = NULL;
|
|
ggml_tensor * Kcur = NULL;
|
|
ggml_tensor * Vcur = NULL;
|
|
|
|
Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
|
|
Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
|
|
Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
|
|
cb(Qcur, "q", il);
|
|
cb(Kcur, "k", il);
|
|
cb(Vcur, "v", il);
|
|
|
|
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embed_head, n_head, n_tokens);
|
|
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embed_head, n_head, n_tokens);
|
|
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embed_head, n_head, n_tokens);
|
|
|
|
GGML_ASSERT(fabs(freq_base - 10000.0) < 1e-4);
|
|
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_embed_head, ocr_rope_type, 0, freq_base, 1, 0, 1, 0, 0);
|
|
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_embed_head, ocr_rope_type, 0, freq_base, 1, 0, 1, 0, 0);
|
|
cb(Qcur, "q_pe", il);
|
|
cb(Kcur, "k_pe", il);
|
|
|
|
cur = build_attn(inp_attn_kv,
|
|
model.layers[il].wo, NULL, model.layers[il].wo_s,
|
|
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
|
|
cb(cur, "attn_out", il);
|
|
}
|
|
else {
|
|
ggml_tensor * q = NULL;
|
|
|
|
const bool is_lite = model.layers[il].wq;
|
|
|
|
if (!is_lite) {
|
|
q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
|
|
cb(q, "q", il);
|
|
|
|
q = build_norm(q, model.layers[il].attn_q_a_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(q, "q", il);
|
|
|
|
q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
|
|
cb(q, "q", il);
|
|
} else {
|
|
q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
|
|
cb(q, "q", il);
|
|
}
|
|
// split into {n_embd_head_qk_nope, n_head, n_tokens}
|
|
ggml_tensor * q_nope =
|
|
ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k),
|
|
ggml_row_size(q->type, n_embd_head_k) * n_head, 0);
|
|
cb(q_nope, "q_nope", il);
|
|
|
|
// and {n_embd_head_qk_rope, n_head, n_tokens}
|
|
ggml_tensor * q_pe = ggml_view_3d(
|
|
ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k),
|
|
ggml_row_size(q->type, n_embd_head_k) * n_head, ggml_row_size(q->type, n_embd_head_qk_nope));
|
|
cb(q_pe, "q_pe", il);
|
|
|
|
ggml_tensor * kv_cmpr_pe = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
|
|
cb(kv_cmpr_pe, "kv_cmpr_pe", il);
|
|
|
|
// split into {kv_lora_rank, n_tokens}
|
|
ggml_tensor * kv_cmpr =
|
|
ggml_view_2d(ctx0, kv_cmpr_pe, kv_lora_rank, n_tokens,
|
|
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope), 0);
|
|
cb(kv_cmpr, "kv_cmpr", il);
|
|
|
|
// and {n_embd_head_qk_rope, 1, n_tokens}
|
|
ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_cmpr_pe, n_embd_head_qk_rope, 1, n_tokens,
|
|
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
|
|
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
|
|
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank));
|
|
cb(k_pe, "k_pe", il);
|
|
|
|
q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
ext_factor, attn_factor, beta_fast, beta_slow);
|
|
cb(q_pe, "q_pe", il);
|
|
|
|
k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
ext_factor, attn_factor, beta_fast, beta_slow);
|
|
cb(k_pe, "k_pe", il);
|
|
|
|
kv_cmpr = build_norm(kv_cmpr, model.layers[il].attn_kv_a_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(kv_cmpr, "kv_cmpr", il);
|
|
|
|
if (is_mla) {
|
|
// {n_embd_head_qk_nope, n_tokens, n_head}
|
|
q_nope = ggml_permute(ctx0, q_nope, 0, 2, 1, 3);
|
|
cb(q_nope, "q_nope_perm", il);
|
|
|
|
// {n_embd_head_qk_nope, kv_lora_rank, n_head} x {n_embd_head_qk_nope, n_tokens, n_head}
|
|
ggml_tensor * q_nope_absorbed = ggml_mul_mat(ctx0, model.layers[il].wk_b, q_nope);
|
|
cb(q_nope_absorbed, "q_nope_absorbed", il);
|
|
|
|
// {kv_lora_rank, n_head, n_tokens}
|
|
q_nope_absorbed = ggml_permute(ctx0, q_nope_absorbed, 0, 2, 1, 3);
|
|
cb(q_nope_absorbed, "q_nope_absorbed_perm", il);
|
|
|
|
// {n_embd_head_qk_rope + kv_lora_rank, n_head, n_tokens}
|
|
// note: rope must go first for in-place context shifting in build_rope_shift()
|
|
ggml_tensor * Qcur = ggml_concat(ctx0, q_nope_absorbed, q_pe, 0);
|
|
cb(Qcur, "Qcur", il);
|
|
|
|
kv_cmpr = ggml_reshape_3d(ctx0, kv_cmpr, kv_lora_rank, 1, n_tokens);
|
|
cb(kv_cmpr, "kv_cmpr_reshape", il);
|
|
|
|
// {n_embd_head_qk_rope + kv_lora_rank, 1, n_tokens}
|
|
ggml_tensor * Kcur = ggml_concat(ctx0, kv_cmpr, k_pe, 0);
|
|
cb(Kcur, "Kcur", il);
|
|
|
|
// {kv_lora_rank, 1, n_tokens}
|
|
ggml_tensor * Vcur = kv_cmpr;
|
|
cb(Vcur, "Vcur", il);
|
|
|
|
if (inp_attn_scale) {
|
|
// apply llama 4 temperature scaling
|
|
Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
|
|
cb(Qcur, "Qcur_attn_temp_scaled", il);
|
|
}
|
|
|
|
// note: MLA with the absorption optimization converts into MQA (ie: GQA with 1 group)
|
|
cur = build_attn(inp_attn_k,
|
|
model.layers[il].wo, NULL, model.layers[il].wo_s,
|
|
Qcur, Kcur, Vcur, nullptr, nullptr, model.layers[il].wv_b, kq_scale, il);
|
|
} else {
|
|
ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_cmpr);
|
|
cb(kv, "kv", il);
|
|
|
|
// split into {n_embd_head_qk_nope, n_head, n_tokens}
|
|
ggml_tensor * k_nope =
|
|
ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
|
|
ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v),
|
|
ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head, 0);
|
|
cb(k_nope, "k_nope_view", il);
|
|
|
|
// and {n_embd_head_v, n_head, n_tokens}
|
|
ggml_tensor * Vcur = ggml_view_3d(ctx0, kv, n_embd_head_v, n_head, n_tokens,
|
|
ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v),
|
|
ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head,
|
|
ggml_row_size(kv->type, n_embd_head_qk_nope));
|
|
cb(Vcur, "Vcur_view", il);
|
|
|
|
Vcur = ggml_cont(ctx0, Vcur);
|
|
cb(Vcur, "Vcur_cont", il);
|
|
|
|
ggml_tensor * Qcur = ggml_concat(ctx0, q_nope, q_pe, 0);
|
|
cb(Qcur, "Qcur", il);
|
|
|
|
ggml_tensor * Kcur = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
|
|
cb(Kcur, "Kcur", il);
|
|
|
|
if (inp_attn_scale) {
|
|
// apply llama 4 temperature scaling
|
|
Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
|
|
cb(Qcur, "Qcur_attn_temp_scaled", il);
|
|
}
|
|
|
|
// note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
|
|
cur = build_attn(inp_attn_kv,
|
|
model.layers[il].wo, NULL, model.layers[il].wo_s,
|
|
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
|
|
}
|
|
}
|
|
if (il == effective_n_layers - 1 && inp_out_ids) {
|
|
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
|
}
|
|
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
|
cb(ffn_inp, "ffn_inp", il);
|
|
|
|
cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
|
|
cb(cur, "ffn_norm", il);
|
|
|
|
if ((uint32_t) il < hparams.n_layer_dense_lead) {
|
|
cur = build_ffn(cur,
|
|
model.layers[il].ffn_up, NULL, NULL,
|
|
model.layers[il].ffn_gate, NULL, NULL,
|
|
model.layers[il].ffn_down, NULL, NULL,
|
|
NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
cb(cur, "ffn_out", il);
|
|
} else {
|
|
// MoE branch
|
|
ggml_tensor * moe_out = build_moe_ffn(cur,
|
|
model.layers[il].ffn_gate_inp,
|
|
model.layers[il].ffn_up_exps,
|
|
model.layers[il].ffn_gate_exps,
|
|
model.layers[il].ffn_down_exps,
|
|
model.layers[il].ffn_exp_probs_b,
|
|
n_expert, n_expert_used,
|
|
LLM_FFN_SILU, hparams.expert_weights_norm,
|
|
hparams.expert_weights_scale,
|
|
(llama_expert_gating_func_type) hparams.expert_gating_func,
|
|
il,
|
|
nullptr,
|
|
model.layers[il].ffn_gate_up_exps);
|
|
cb(moe_out, "ffn_moe_out", il);
|
|
|
|
// FFN shared expert
|
|
{
|
|
ggml_tensor * ffn_shexp =
|
|
build_ffn(cur,
|
|
model.layers[il].ffn_up_shexp, NULL, NULL,
|
|
model.layers[il].ffn_gate_shexp, NULL, NULL,
|
|
model.layers[il].ffn_down_shexp, NULL, NULL,
|
|
NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
cb(ffn_shexp, "ffn_shexp", il);
|
|
|
|
cur = ggml_add(ctx0, moe_out, ffn_shexp);
|
|
cb(cur, "ffn_out", il);
|
|
}
|
|
}
|
|
cur = ggml_add(ctx0, cur, ffn_inp);
|
|
|
|
cur = build_cvec(cur, il);
|
|
cb(cur, "l_out", il);
|
|
|
|
// input for next layer
|
|
inpL = cur;
|
|
}
|
|
cur = inpL;
|
|
|
|
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
|
|
|
|
cb(cur, "result_norm", -1);
|
|
res->t_embd = cur;
|
|
|
|
// lm_head
|
|
cur = ggml_mul_mat(ctx0, model.output, cur);
|
|
|
|
cb(cur, "result_output", -1);
|
|
res->t_logits = cur;
|
|
|
|
ggml_build_forward_expand(gf, cur);
|
|
}
|