optimize gguf dequant, save mem, support Q2_K

use marlin for lm_head, lm_head only calc last token for prefill
extend context window to 19K for DeepSeek-V3/R1 within 24GB VRAM
This commit is contained in:
Atream 2025-02-22 06:13:01 +00:00
parent 7e1fe256c8
commit 5ec33d046d
27 changed files with 435 additions and 259 deletions

View file

@ -1699,7 +1699,7 @@ class DeepseekV3ForCausalLM(DeepseekV3PreTrainedModel):
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states.to(self.lm_head.weight.device))
logits = self.lm_head(hidden_states[:,-1:,:])
logits = logits.float()
loss = None