mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-16 10:09:42 +00:00
optimize gguf dequant, save mem, support Q2_K
use marlin for lm_head, lm_head only calc last token for prefill extend context window to 19K for DeepSeek-V3/R1 within 24GB VRAM
This commit is contained in:
parent
7e1fe256c8
commit
5ec33d046d
27 changed files with 435 additions and 259 deletions
16
ktransformers/ktransformers_ext/cuda/test_dequant.py
Normal file
16
ktransformers/ktransformers_ext/cuda/test_dequant.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
import os
|
||||
import sys
|
||||
sys.path.insert(0,"/home/zbx/ktransformers")
|
||||
from ktransformers.util.custom_gguf import GGUFLoader
|
||||
import torch
|
||||
|
||||
gguf_loader_1 = GGUFLoader("/mnt/data/model/DeepseekV3-q4km-gguf")
|
||||
gguf_loader_2 = GGUFLoader("/mnt/data/chenht/model/gguf_for_ktransformers/DeepSeek-V3-bf16/")
|
||||
|
||||
torch.set_default_dtype(torch.bfloat16)
|
||||
|
||||
tensor_1 = gguf_loader_1.load_gguf_tensor("blk.0.attn_kv_a_mqa.weight", "cuda")
|
||||
tensor_2 = gguf_loader_2.load_gguf_tensor("blk.0.attn_kv_a_mqa.weight", "cuda")
|
||||
|
||||
print(tensor_1[0, -64:])
|
||||
print(tensor_2[0, -64:])
|
Loading…
Add table
Add a link
Reference in a new issue