optimize gguf dequant, save mem, support Q2_K

use marlin for lm_head, lm_head only calc last token for prefill
extend context window to 19K for DeepSeek-V3/R1 within 24GB VRAM
This commit is contained in:
Atream 2025-02-22 06:13:01 +00:00
parent 7e1fe256c8
commit 5ec33d046d
27 changed files with 435 additions and 259 deletions

View file

@ -1742,8 +1742,7 @@ class DeepseekV2ForCausalLM(DeepseekV2PreTrainedModel):
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
logits = logits[:,-1,:].unsqueeze(0).float()
logits = self.lm_head(hidden_states[:,-1:,:]).float()
loss = None
if labels is not None: