mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-05-02 13:41:15 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
142 lines
3.9 KiB
Python
142 lines
3.9 KiB
Python
#!/usr/bin/env python
|
|
# coding=utf-8
|
|
"""
|
|
Description :
|
|
Author : Jianwei Dong
|
|
Date : 2024-08-28 10:32:05
|
|
Version : 1.0.0
|
|
LastEditors : chenht2022
|
|
LastEditTime : 2024-08-28 10:32:05
|
|
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
|
"""
|
|
import os, sys
|
|
import time
|
|
|
|
sys.path.append(os.path.dirname(__file__) + "/../build")
|
|
import cpuinfer_ext
|
|
from flash_attn import flash_attn_with_kvcache
|
|
import torch
|
|
|
|
layer_num = 10
|
|
kv_head_num = 8
|
|
q_head_num = 32
|
|
head_dim = 128
|
|
block_len = 128
|
|
anchor_num = 1
|
|
cache_seqlen = 8192
|
|
cache_seqlens = torch.tensor([cache_seqlen], dtype=torch.int32, device="cpu")
|
|
seqlens_zero = torch.zeros((1,), dtype=torch.int32, device="cpu")
|
|
anchor_type = cpuinfer_ext.kvcache.AnchorType.DYNAMIC
|
|
kv_type = cpuinfer_ext.kvcache.ggml_type.FP16
|
|
retrieval_type = cpuinfer_ext.kvcache.RetrievalType.LAYER
|
|
layer_step: int = 1
|
|
token_step: int = 1
|
|
layer_offset: int = 0
|
|
max_thread_num: int = 2
|
|
max_batch_size: int = 1
|
|
max_block_num: int = 512
|
|
CPUInfer = cpuinfer_ext.CPUInfer(max_thread_num)
|
|
validation_iter = 100
|
|
|
|
with torch.inference_mode(mode=True):
|
|
config = cpuinfer_ext.kvcache.KVCacheConfig(
|
|
layer_num,
|
|
kv_head_num,
|
|
q_head_num,
|
|
head_dim,
|
|
block_len,
|
|
anchor_num,
|
|
anchor_type,
|
|
kv_type,
|
|
retrieval_type,
|
|
layer_step,
|
|
token_step,
|
|
layer_offset,
|
|
max_block_num,
|
|
max_batch_size,
|
|
max_thread_num,
|
|
)
|
|
local_kvcache = cpuinfer_ext.kvcache.KVCache(config)
|
|
|
|
kvcaches = []
|
|
block_table = (
|
|
torch.arange(max_block_num, dtype=torch.int32, device="cpu")
|
|
.contiguous()
|
|
.view(1, -1)
|
|
)
|
|
|
|
for layer_idx in range(layer_num):
|
|
k_cache = torch.randn(
|
|
(1, cache_seqlen, kv_head_num, head_dim), dtype=torch.float16, device="cpu"
|
|
).contiguous()
|
|
v_cache = torch.randn(
|
|
(1, cache_seqlen, kv_head_num, head_dim), dtype=torch.float16, device="cpu"
|
|
).contiguous()
|
|
|
|
CPUInfer.submit(
|
|
local_kvcache.update_kvcache_fp16(
|
|
k_cache.data_ptr(),
|
|
v_cache.data_ptr(),
|
|
layer_idx,
|
|
block_table.data_ptr(),
|
|
1,
|
|
max_block_num,
|
|
seqlens_zero.data_ptr(),
|
|
cache_seqlen,
|
|
)
|
|
)
|
|
CPUInfer.sync()
|
|
|
|
kvcaches.append((k_cache.to("cuda"), v_cache.to("cuda")))
|
|
|
|
# validation
|
|
for i in range(validation_iter):
|
|
|
|
k_cache = kvcaches[i % layer_num][0]
|
|
v_cache = kvcaches[i % layer_num][1]
|
|
input = torch.randn(
|
|
(1, 1, q_head_num, head_dim), dtype=torch.float16, device="cpu"
|
|
).contiguous()
|
|
output = torch.empty(
|
|
(1, 1, q_head_num, head_dim), dtype=torch.float16, device="cpu"
|
|
).contiguous()
|
|
|
|
# attn_lse: (bsz, q_len, q_head_num)
|
|
attn_lse = torch.empty(
|
|
(1, 1, q_head_num), dtype=torch.float32, device="cpu"
|
|
).contiguous()
|
|
input = input / 100
|
|
|
|
CPUInfer.submit(
|
|
local_kvcache.attn(
|
|
input.data_ptr(),
|
|
output.data_ptr(),
|
|
attn_lse.data_ptr(),
|
|
i % layer_num,
|
|
0,
|
|
1,
|
|
1,
|
|
max_block_num,
|
|
block_table.data_ptr(),
|
|
cache_seqlens.data_ptr(),
|
|
-1,
|
|
-1,
|
|
-1,
|
|
)
|
|
)
|
|
CPUInfer.sync()
|
|
# print("cpuinfer output", output)
|
|
|
|
t_output = flash_attn_with_kvcache(
|
|
q=input.to("cuda"),
|
|
k_cache=k_cache,
|
|
v_cache=v_cache,
|
|
cache_seqlens=cache_seqlens.to("cuda"),
|
|
)
|
|
# print("torch output", t_output)
|
|
|
|
diff = torch.mean(torch.abs(output.to("cuda") - t_output)) / torch.mean(
|
|
torch.abs(t_output)
|
|
)
|
|
print("diff = ", diff)
|
|
assert diff < 0.001
|