Merge commit '456af35eb7' into concedo_experimental

# Conflicts:
#	ggml/src/ggml-sycl/getrows.cpp
#	src/CMakeLists.txt
#	tools/llama-bench/llama-bench.cpp
This commit is contained in:
Concedo 2025-06-20 23:41:27 +08:00
commit b59b5dbbd1
28 changed files with 1403 additions and 496 deletions

View file

@ -8,7 +8,8 @@
#include "llama-kv-cache-unified.h"
#include "llama-kv-cache-unified-iswa.h"
#include "llama-kv-cache-recurrent.h"
#include "llama-memory-hybrid.h"
#include "llama-memory-recurrent.h"
#include "ggml-cpp.h"
@ -475,6 +476,10 @@ void llama_model::load_hparams(llama_model_loader & ml) {
std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
std::fill(
hparams.recurrent_layer_arr.begin(),
hparams.recurrent_layer_arr.end(),
llm_arch_is_recurrent(ml.get_arch()));
std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
@ -9211,7 +9216,7 @@ struct llm_build_mamba : public llm_graph_context {
// {n_embd, n_tokens}
inpL = build_inp_embd(model.tok_embd);
ggml_tensor * state_copy = build_inp_s_copy();
auto * rs_inp = build_rs_inp();
for (int il = 0; il < n_layer; ++il) {
// norm
@ -9220,7 +9225,7 @@ struct llm_build_mamba : public llm_graph_context {
LLM_NORM_RMS, il);
cb(cur, "attn_norm", il);
cur = build_mamba_layer(gf, cur, state_copy, ubatch, il);
cur = build_mamba_layer(rs_inp, gf, cur, ubatch, il);
if (il == n_layer - 1) {
// skip computing output for unused tokens
@ -9258,12 +9263,12 @@ struct llm_build_mamba : public llm_graph_context {
// TODO: split
ggml_tensor * build_mamba_layer(
ggml_cgraph * gf,
ggml_tensor * cur,
ggml_tensor * state_copy,
const llama_ubatch & ubatch,
int il) const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * cur,
const llama_ubatch & ubatch,
int il) const {
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto kv_head = kv_state->get_head();
@ -9283,17 +9288,17 @@ struct llm_build_mamba : public llm_graph_context {
GGML_ASSERT(ubatch.equal_seqs);
GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
ggml_tensor * conv_states_all = kv_state->get_k_l(il);
ggml_tensor * ssm_states_all = kv_state->get_v_l(il);
ggml_tensor * conv_states_all = kv_state->get_r_l(il);
ggml_tensor * ssm_states_all = kv_state->get_s_l(il);
// (ab)using the KV cache to store the states
ggml_tensor * conv = build_recurrent_state(
gf, conv_states_all, state_copy,
hparams.n_embd_k_s(), n_seqs);
ggml_tensor * conv = build_rs(
inp, gf, conv_states_all,
hparams.n_embd_r(), n_seqs);
conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs);
ggml_tensor * ssm = build_recurrent_state(
gf, ssm_states_all, state_copy,
hparams.n_embd_v_s(), n_seqs);
ggml_tensor * ssm = build_rs(
inp, gf, ssm_states_all,
hparams.n_embd_s(), n_seqs);
ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs);
// {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs}
@ -12004,13 +12009,13 @@ struct llm_build_rwkv6_base : public llm_graph_context {
}
ggml_tensor * build_rwkv6_time_mix(
llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * cur,
ggml_tensor * x_prev,
ggml_tensor * state_copy,
const llama_ubatch & ubatch,
int il) const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto n_tokens = ubatch.n_tokens;
const auto n_seqs = ubatch.n_seqs;
@ -12131,9 +12136,9 @@ struct llm_build_rwkv6_base : public llm_graph_context {
k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w));
}
ggml_tensor * wkv_state = build_recurrent_state(
gf, kv_state->get_v_l(il), state_copy,
hparams.n_embd_v_s(), n_seqs);
ggml_tensor * wkv_state = build_rs(
inp, gf, kv_state->get_s_l(il),
hparams.n_embd_s(), n_seqs);
ggml_tensor * wkv_output;
if (is_qrwkv) {
@ -12151,9 +12156,9 @@ struct llm_build_rwkv6_base : public llm_graph_context {
wkv_state,
ggml_view_1d(
ctx0,
kv_state->get_v_l(il),
hparams.n_embd_v_s() * n_seqs,
hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il))
kv_state->get_s_l(il),
hparams.n_embd_s() * n_seqs,
hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il))
)
)
);
@ -12187,7 +12192,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base {
inpL = build_inp_embd(model.tok_embd);
inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
ggml_tensor * state_copy = build_inp_s_copy();
auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
@ -12197,9 +12202,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
ggml_tensor * token_shift = build_rwkv_token_shift_load(
gf, state_copy, ubatch, il
);
ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
@ -12214,7 +12217,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base {
1
);
cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il);
cur = build_rwkv6_time_mix(rs_inp, gf, att_norm, x_prev, ubatch, il);
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
cb(ffn_inp, "ffn_inp", il);
@ -12277,14 +12280,14 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base {
// ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py
struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base {
llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) {
GGML_ASSERT(n_embd == hparams.n_embd_k_s());
GGML_ASSERT(n_embd == hparams.n_embd_r());
ggml_tensor * cur;
ggml_tensor * inpL;
inpL = build_inp_embd(model.tok_embd);
ggml_tensor * state_copy = build_inp_s_copy();
auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
@ -12294,9 +12297,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
ggml_tensor * token_shift = build_rwkv_token_shift_load(
gf, state_copy, ubatch, il
);
ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
cb(att_norm, "attn_norm", il);
@ -12308,7 +12309,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base {
1
);
cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il);
cur = build_rwkv6_time_mix(rs_inp, gf, att_norm, x_prev, ubatch, il);
token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
@ -12396,14 +12397,14 @@ struct llm_build_rwkv7_base : public llm_graph_context {
}
ggml_tensor * build_rwkv7_time_mix(
llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * cur,
ggml_tensor * x_prev,
ggml_tensor * state_copy,
ggml_tensor *& first_layer_value,
const llama_ubatch & ubatch,
int il) const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto n_tokens = ubatch.n_tokens;
const auto n_seqs = ubatch.n_seqs;
@ -12482,9 +12483,9 @@ struct llm_build_rwkv7_base : public llm_graph_context {
v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens);
a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens);
ggml_tensor * wkv_state = build_recurrent_state(
gf, kv_state->get_v_l(il), state_copy,
hparams.n_embd_v_s(), n_seqs);
ggml_tensor * wkv_state = build_rs(
inp, gf, kv_state->get_s_l(il),
hparams.n_embd_s(), n_seqs);
ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state);
cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0);
@ -12497,9 +12498,9 @@ struct llm_build_rwkv7_base : public llm_graph_context {
wkv_state,
ggml_view_1d(
ctx0,
kv_state->get_v_l(il),
hparams.n_embd_v_s() * n_seqs,
hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il))
kv_state->get_s_l(il),
hparams.n_embd_s() * n_seqs,
hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il))
)
)
);
@ -12540,7 +12541,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base {
inpL = build_inp_embd(model.tok_embd);
inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
ggml_tensor * state_copy = build_inp_s_copy();
auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
@ -12550,9 +12551,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
ggml_tensor * token_shift = build_rwkv_token_shift_load(
gf, state_copy, ubatch, il
);
ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
@ -12567,7 +12566,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base {
1
);
cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il);
cur = build_rwkv7_time_mix(rs_inp, gf, att_norm, x_prev, v_first, ubatch, il);
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
cb(ffn_inp, "ffn_inp", il);
@ -12625,7 +12624,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base {
struct llm_build_arwkv7 : public llm_build_rwkv7_base {
llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) {
GGML_ASSERT(n_embd == hparams.n_embd_k_s());
GGML_ASSERT(n_embd == hparams.n_embd_r());
ggml_tensor * cur;
ggml_tensor * inpL;
@ -12633,7 +12632,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base {
inpL = build_inp_embd(model.tok_embd);
ggml_tensor * state_copy = build_inp_s_copy();
auto * rs_inp = build_rs_inp();
const auto n_embd = hparams.n_embd;
const auto n_seq_tokens = ubatch.n_seq_tokens;
@ -12643,9 +12642,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base {
const llama_layer * layer = &model.layers[il];
inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
ggml_tensor * token_shift = build_rwkv_token_shift_load(
gf, state_copy, ubatch, il
);
ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il);
ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
cb(att_norm, "attn_norm", il);
@ -12657,7 +12654,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base {
1
);
cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il);
cur = build_rwkv7_time_mix(rs_inp, gf, att_norm, x_prev, v_first, ubatch, il);
token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
@ -13838,6 +13835,8 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
llama_memory_i * res;
switch (arch) {
// Models that need specific instantiation should be handled in the
// switch statement
case LLM_ARCH_BERT:
case LLM_ARCH_JINA_BERT_V2:
case LLM_ARCH_NOMIC_BERT:
@ -13847,57 +13846,75 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
{
res = nullptr;
} break;
case LLM_ARCH_MAMBA:
case LLM_ARCH_RWKV6:
case LLM_ARCH_RWKV6QWEN2:
case LLM_ARCH_RWKV7:
case LLM_ARCH_ARWKV7:
{
res = new llama_kv_cache_recurrent(
*this,
GGML_TYPE_F32,
GGML_TYPE_F32,
cparams.offload_kqv,
std::max((uint32_t) 1, cparams.n_seq_max),
cparams.n_seq_max);
} break;
// Models that need standard caching should rely on recurrent/hybrid
// checks
default:
{
const auto padding = llama_kv_cache_unified::get_padding(cparams);
cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
GGML_ASSERT(hparams.is_swa_any());
res = new llama_kv_cache_unified_iswa(
*this,
params.type_k,
params.type_v,
!cparams.flash_attn,
cparams.offload_kqv,
params.swa_full,
cparams.n_ctx,
cparams.n_seq_max,
cparams.n_ubatch,
padding);
} else {
GGML_ASSERT(!hparams.is_swa_any());
res = new llama_kv_cache_unified(
if (llm_arch_is_recurrent(arch)) {
res = new llama_memory_recurrent(
*this,
nullptr,
params.type_k,
params.type_v,
!cparams.flash_attn,
GGML_TYPE_F32,
GGML_TYPE_F32,
cparams.offload_kqv,
cparams.n_ctx,
cparams.n_seq_max,
padding,
hparams.n_swa,
hparams.swa_type);
std::max((uint32_t) 1, cparams.n_seq_max),
cparams.n_seq_max);
} else if (llm_arch_is_hybrid(arch)) {
const auto padding = llama_kv_cache_unified::get_padding(cparams);
cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
res = new llama_memory_hybrid(
/* model */ *this,
/* attn_type_k */ params.type_k,
/* attn_type_v */ params.type_v,
/* attn_v_trans */ !cparams.flash_attn,
/* attn_kv_size */ cparams.n_ctx,
/* attn_n_pad */ padding,
/* attn_n_swa */ hparams.n_swa,
/* attn_swa_type */ hparams.swa_type,
/* recurrent_type_k */ GGML_TYPE_F32,
/* recurrent_type_v */ GGML_TYPE_F32,
/* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max),
/* n_seq_max */ cparams.n_seq_max,
/* offload */ cparams.offload_kqv);
} else {
const auto padding = llama_kv_cache_unified::get_padding(cparams);
cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
GGML_ASSERT(hparams.is_swa_any());
res = new llama_kv_cache_unified_iswa(
*this,
params.type_k,
params.type_v,
!cparams.flash_attn,
cparams.offload_kqv,
params.swa_full,
cparams.n_ctx,
cparams.n_seq_max,
cparams.n_ubatch,
padding);
} else {
GGML_ASSERT(!hparams.is_swa_any());
res = new llama_kv_cache_unified(
*this,
nullptr,
params.type_k,
params.type_v,
!cparams.flash_attn,
cparams.offload_kqv,
cparams.n_ctx,
cparams.n_seq_max,
padding,
hparams.n_swa,
hparams.swa_type);
}
}
}
}
@ -14477,14 +14494,7 @@ llama_token llama_model_decoder_start_token(const llama_model * model) {
}
bool llama_model_is_recurrent(const llama_model * model) {
switch (model->arch) {
case LLM_ARCH_MAMBA: return true;
case LLM_ARCH_RWKV6: return true;
case LLM_ARCH_RWKV6QWEN2: return true;
case LLM_ARCH_RWKV7: return true;
case LLM_ARCH_ARWKV7: return true;
default: return false;
}
return llm_arch_is_recurrent(model->arch);
}
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {