mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-09 13:55:27 +00:00
optimize gguf dequant, save mem, support Q2_K
use marlin for lm_head, lm_head only calc last token for prefill extend context window to 19K for DeepSeek-V3/R1 within 24GB VRAM
This commit is contained in:
parent
7e1fe256c8
commit
5ec33d046d
27 changed files with 435 additions and 259 deletions
|
@ -126,6 +126,8 @@ def optimize_and_load_gguf(module: nn.Module, rule_file: str, gguf_path: str, mo
|
|||
gguf_loader=GGUFLoader(gguf_path)
|
||||
with torch.device("meta"):
|
||||
inject(module, optimize_config, model_config, gguf_loader)
|
||||
# pre load lm_head because its big inter result
|
||||
load_weights(module.lm_head, gguf_loader, "lm_head.")
|
||||
load_weights(module, gguf_loader)
|
||||
module.gguf_loader = gguf_loader
|
||||
del_meta(module)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue