mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 09:34:37 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # src/llama-context.cpp # tests/test-backend-ops.cpp
This commit is contained in:
commit
fb13e3e51b
26 changed files with 543 additions and 447 deletions
|
@ -9271,9 +9271,9 @@ struct llm_build_mamba : public llm_graph_context {
|
|||
ggml_tensor * cur,
|
||||
const llama_ubatch & ubatch,
|
||||
int il) const {
|
||||
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
|
||||
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
|
||||
|
||||
const auto kv_head = kv_state->get_head();
|
||||
const auto kv_head = mctx_cur->get_head();
|
||||
|
||||
const int64_t d_conv = hparams.ssm_d_conv;
|
||||
const int64_t d_inner = hparams.ssm_d_inner;
|
||||
|
@ -9291,8 +9291,8 @@ struct llm_build_mamba : public llm_graph_context {
|
|||
GGML_ASSERT(ubatch.equal_seqs);
|
||||
GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
|
||||
|
||||
ggml_tensor * conv_states_all = kv_state->get_r_l(il);
|
||||
ggml_tensor * ssm_states_all = kv_state->get_s_l(il);
|
||||
ggml_tensor * conv_states_all = mctx_cur->get_r_l(il);
|
||||
ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il);
|
||||
|
||||
// (ab)using the KV cache to store the states
|
||||
ggml_tensor * conv = build_rs(
|
||||
|
@ -12016,7 +12016,7 @@ struct llm_build_rwkv6_base : public llm_graph_context {
|
|||
ggml_tensor * x_prev,
|
||||
const llama_ubatch & ubatch,
|
||||
int il) const {
|
||||
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
|
||||
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
|
||||
|
||||
const auto n_tokens = ubatch.n_tokens;
|
||||
const auto n_seqs = ubatch.n_seqs;
|
||||
|
@ -12026,7 +12026,7 @@ struct llm_build_rwkv6_base : public llm_graph_context {
|
|||
const auto n_head = n_embd / head_size;
|
||||
const auto n_head_kv = hparams.n_head_kv(il);
|
||||
|
||||
const auto kv_head = kv_state->get_head();
|
||||
const auto kv_head = mctx_cur->get_head();
|
||||
|
||||
const auto & layer = model.layers[il];
|
||||
|
||||
|
@ -12138,7 +12138,7 @@ struct llm_build_rwkv6_base : public llm_graph_context {
|
|||
}
|
||||
|
||||
ggml_tensor * wkv_state = build_rs(
|
||||
inp, gf, kv_state->get_s_l(il),
|
||||
inp, gf, mctx_cur->get_s_l(il),
|
||||
hparams.n_embd_s(), n_seqs);
|
||||
|
||||
ggml_tensor * wkv_output;
|
||||
|
@ -12157,9 +12157,9 @@ struct llm_build_rwkv6_base : public llm_graph_context {
|
|||
wkv_state,
|
||||
ggml_view_1d(
|
||||
ctx0,
|
||||
kv_state->get_s_l(il),
|
||||
mctx_cur->get_s_l(il),
|
||||
hparams.n_embd_s() * n_seqs,
|
||||
hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il))
|
||||
hparams.n_embd_s() * kv_head * ggml_element_size(mctx_cur->get_s_l(il))
|
||||
)
|
||||
)
|
||||
);
|
||||
|
@ -12413,7 +12413,7 @@ struct llm_build_rwkv7_base : public llm_graph_context {
|
|||
ggml_tensor *& first_layer_value,
|
||||
const llama_ubatch & ubatch,
|
||||
int il) const {
|
||||
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
|
||||
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
|
||||
|
||||
const auto n_tokens = ubatch.n_tokens;
|
||||
const auto n_seqs = ubatch.n_seqs;
|
||||
|
@ -12422,7 +12422,7 @@ struct llm_build_rwkv7_base : public llm_graph_context {
|
|||
const auto head_count = n_embd / head_size;
|
||||
const auto n_seq_tokens = ubatch.n_seq_tokens;
|
||||
|
||||
const auto kv_head = kv_state->get_head();
|
||||
const auto kv_head = mctx_cur->get_head();
|
||||
|
||||
const auto & layer = model.layers[il];
|
||||
|
||||
|
@ -12493,7 +12493,7 @@ struct llm_build_rwkv7_base : public llm_graph_context {
|
|||
a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens);
|
||||
|
||||
ggml_tensor * wkv_state = build_rs(
|
||||
inp, gf, kv_state->get_s_l(il),
|
||||
inp, gf, mctx_cur->get_s_l(il),
|
||||
hparams.n_embd_s(), n_seqs);
|
||||
|
||||
ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state);
|
||||
|
@ -12507,9 +12507,9 @@ struct llm_build_rwkv7_base : public llm_graph_context {
|
|||
wkv_state,
|
||||
ggml_view_1d(
|
||||
ctx0,
|
||||
kv_state->get_s_l(il),
|
||||
mctx_cur->get_s_l(il),
|
||||
hparams.n_embd_s() * n_seqs,
|
||||
hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il))
|
||||
hparams.n_embd_s() * kv_head * ggml_element_size(mctx_cur->get_s_l(il))
|
||||
)
|
||||
)
|
||||
);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue