mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-15 09:39:42 +00:00
Merge pull request #1090 from kvcache-ai/Atream-patch-1
Update attention.py
This commit is contained in:
commit
9037bf30d5
1 changed files with 2 additions and 1 deletions
|
@ -435,6 +435,7 @@ class KDeepseekV2Attention(BaseInjectedModule, DeepseekV2Attention):
|
|||
kv_len_arr = torch.tensor([position_ids[0, -1].item()+1], dtype=torch.int32, device=self.device)
|
||||
self.mla_wrapper.plan(qo_indptr,None,None,
|
||||
kv_len_arr,
|
||||
None,
|
||||
self.num_heads,
|
||||
self.kv_lora_rank,
|
||||
self.qk_rope_head_dim,
|
||||
|
@ -849,4 +850,4 @@ class flashinfer_attn(BaseInjectedModule, DeepseekV2Attention):
|
|||
attn_output = attn_output.transpose(0, 1)
|
||||
attn_output = attn_output.reshape(q_len, self.num_heads * self.v_head_dim)
|
||||
attn_output = self.o_proj(attn_output, num_tokens_tensors)
|
||||
return attn_output
|
||||
return attn_output
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue