mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-04-28 11:49:51 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
58 lines
1.7 KiB
Python
58 lines
1.7 KiB
Python
import os
|
|
# os.environ["CUDA_VISIBLE_DEVICES"]="1,2"
|
|
# add path
|
|
import sys
|
|
current_path = os.path.abspath(os.path.dirname(__file__))
|
|
sys.path.append(current_path+"/../..")
|
|
import numpy as np
|
|
# from ktransformers.operators.linear import KTransformersLinear, KLinearMarlin
|
|
# from ktransformers.operators.experts import KTransformersExperts, KExpertsTorch
|
|
from ktransformers.util.custom_gguf import GGUFLoader
|
|
import torch
|
|
import KTransformersOps
|
|
torch.set_default_dtype(torch.bfloat16)
|
|
import time
|
|
from transformers import (
|
|
AutoConfig,
|
|
)
|
|
import os
|
|
# CUDA_LAUNCH_BLOCKING=1
|
|
os.environ["CUDA_LAUNCH_BLOCKING"]="1"
|
|
|
|
gguf_config = GGUFLoader("/data/Qwen2-57B-A14B-Instruct-GGUF/q4_k_m")
|
|
model_name = "/data/Qwen2-57B-A14B-Instruct"
|
|
|
|
# Q4k
|
|
key = "blk.1."
|
|
target = "attn_q.weight"
|
|
|
|
t1 = time.time()
|
|
q_weight_cpu = gguf_config.load_gguf_tensor(key+target, "cpu")
|
|
# q_weight_cpu = torch.from_numpy(q_weight_cpu)
|
|
|
|
t2 = time.time()
|
|
q_weight_gpu = gguf_config.load_gguf_tensor(key+target, "cuda:0")
|
|
t3 = time.time()
|
|
print()
|
|
allclose = torch.allclose(q_weight_cpu, q_weight_gpu.cpu(), atol=1e-6)
|
|
print(f"Q4k {key+target}")
|
|
print("load gguf tensor from cpu cost: ", t2-t1)
|
|
print("load gguf tensor from gpu cost: ", t3-t2)
|
|
print("allclose: ", allclose)
|
|
|
|
|
|
# Q6k
|
|
key = "blk.0."
|
|
target = "ffn_down_exps.weight"
|
|
|
|
t1 = time.time()
|
|
q_weight_cpu = gguf_config.load_gguf_tensor(key+target, "cpu")
|
|
t2 = time.time()
|
|
q_weight_gpu = gguf_config.load_gguf_tensor(key+target, "cuda:0")
|
|
t3 = time.time()
|
|
print()
|
|
allclose = torch.allclose(q_weight_cpu, q_weight_gpu.cpu().to(torch.float32), atol=1e-6)
|
|
print(f"Q6k {key+target}")
|
|
print("load gguf tensor from cpu cost: ", t2-t1)
|
|
print("load gguf tensor from gpu cost: ", t3-t2)
|
|
print("allclose: ", allclose)
|