mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-05-02 21:51:30 +00:00
Refactor: restructure repository to focus on kt-kernel and KT-SFT modulesq recon (#1581)
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
This commit is contained in:
parent
8729435d85
commit
57d14d22bc
510 changed files with 711 additions and 334 deletions
142
archive/csrc/ktransformers_ext/examples/test_attention.py
Normal file
142
archive/csrc/ktransformers_ext/examples/test_attention.py
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
"""
|
||||
Description :
|
||||
Author : Jianwei Dong
|
||||
Date : 2024-08-28 10:32:05
|
||||
Version : 1.0.0
|
||||
LastEditors : chenht2022
|
||||
LastEditTime : 2024-08-28 10:32:05
|
||||
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
||||
"""
|
||||
import os, sys
|
||||
import time
|
||||
|
||||
sys.path.append(os.path.dirname(__file__) + "/../build")
|
||||
import cpuinfer_ext
|
||||
from flash_attn import flash_attn_with_kvcache
|
||||
import torch
|
||||
|
||||
layer_num = 10
|
||||
kv_head_num = 8
|
||||
q_head_num = 32
|
||||
head_dim = 128
|
||||
block_len = 128
|
||||
anchor_num = 1
|
||||
cache_seqlen = 8192
|
||||
cache_seqlens = torch.tensor([cache_seqlen], dtype=torch.int32, device="cpu")
|
||||
seqlens_zero = torch.zeros((1,), dtype=torch.int32, device="cpu")
|
||||
anchor_type = cpuinfer_ext.kvcache.AnchorType.DYNAMIC
|
||||
kv_type = cpuinfer_ext.kvcache.ggml_type.FP16
|
||||
retrieval_type = cpuinfer_ext.kvcache.RetrievalType.LAYER
|
||||
layer_step: int = 1
|
||||
token_step: int = 1
|
||||
layer_offset: int = 0
|
||||
max_thread_num: int = 2
|
||||
max_batch_size: int = 1
|
||||
max_block_num: int = 512
|
||||
CPUInfer = cpuinfer_ext.CPUInfer(max_thread_num)
|
||||
validation_iter = 100
|
||||
|
||||
with torch.inference_mode(mode=True):
|
||||
config = cpuinfer_ext.kvcache.KVCacheConfig(
|
||||
layer_num,
|
||||
kv_head_num,
|
||||
q_head_num,
|
||||
head_dim,
|
||||
block_len,
|
||||
anchor_num,
|
||||
anchor_type,
|
||||
kv_type,
|
||||
retrieval_type,
|
||||
layer_step,
|
||||
token_step,
|
||||
layer_offset,
|
||||
max_block_num,
|
||||
max_batch_size,
|
||||
max_thread_num,
|
||||
)
|
||||
local_kvcache = cpuinfer_ext.kvcache.KVCache(config)
|
||||
|
||||
kvcaches = []
|
||||
block_table = (
|
||||
torch.arange(max_block_num, dtype=torch.int32, device="cpu")
|
||||
.contiguous()
|
||||
.view(1, -1)
|
||||
)
|
||||
|
||||
for layer_idx in range(layer_num):
|
||||
k_cache = torch.randn(
|
||||
(1, cache_seqlen, kv_head_num, head_dim), dtype=torch.float16, device="cpu"
|
||||
).contiguous()
|
||||
v_cache = torch.randn(
|
||||
(1, cache_seqlen, kv_head_num, head_dim), dtype=torch.float16, device="cpu"
|
||||
).contiguous()
|
||||
|
||||
CPUInfer.submit(
|
||||
local_kvcache.update_kvcache_fp16(
|
||||
k_cache.data_ptr(),
|
||||
v_cache.data_ptr(),
|
||||
layer_idx,
|
||||
block_table.data_ptr(),
|
||||
1,
|
||||
max_block_num,
|
||||
seqlens_zero.data_ptr(),
|
||||
cache_seqlen,
|
||||
)
|
||||
)
|
||||
CPUInfer.sync()
|
||||
|
||||
kvcaches.append((k_cache.to("cuda"), v_cache.to("cuda")))
|
||||
|
||||
# validation
|
||||
for i in range(validation_iter):
|
||||
|
||||
k_cache = kvcaches[i % layer_num][0]
|
||||
v_cache = kvcaches[i % layer_num][1]
|
||||
input = torch.randn(
|
||||
(1, 1, q_head_num, head_dim), dtype=torch.float16, device="cpu"
|
||||
).contiguous()
|
||||
output = torch.empty(
|
||||
(1, 1, q_head_num, head_dim), dtype=torch.float16, device="cpu"
|
||||
).contiguous()
|
||||
|
||||
# attn_lse: (bsz, q_len, q_head_num)
|
||||
attn_lse = torch.empty(
|
||||
(1, 1, q_head_num), dtype=torch.float32, device="cpu"
|
||||
).contiguous()
|
||||
input = input / 100
|
||||
|
||||
CPUInfer.submit(
|
||||
local_kvcache.attn(
|
||||
input.data_ptr(),
|
||||
output.data_ptr(),
|
||||
attn_lse.data_ptr(),
|
||||
i % layer_num,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
max_block_num,
|
||||
block_table.data_ptr(),
|
||||
cache_seqlens.data_ptr(),
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
)
|
||||
)
|
||||
CPUInfer.sync()
|
||||
# print("cpuinfer output", output)
|
||||
|
||||
t_output = flash_attn_with_kvcache(
|
||||
q=input.to("cuda"),
|
||||
k_cache=k_cache,
|
||||
v_cache=v_cache,
|
||||
cache_seqlens=cache_seqlens.to("cuda"),
|
||||
)
|
||||
# print("torch output", t_output)
|
||||
|
||||
diff = torch.mean(torch.abs(output.to("cuda") - t_output)) / torch.mean(
|
||||
torch.abs(t_output)
|
||||
)
|
||||
print("diff = ", diff)
|
||||
assert diff < 0.001
|
||||
62
archive/csrc/ktransformers_ext/examples/test_linear.py
Normal file
62
archive/csrc/ktransformers_ext/examples/test_linear.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Description :
|
||||
Author : chenht2022
|
||||
Date : 2024-07-25 10:32:05
|
||||
Version : 1.0.0
|
||||
LastEditors : chenht2022
|
||||
LastEditTime : 2024-08-06 10:36:59
|
||||
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
||||
'''
|
||||
import os, sys
|
||||
import time
|
||||
sys.path.append(os.path.dirname(__file__) + '/../build')
|
||||
import cpuinfer_ext
|
||||
import torch
|
||||
|
||||
input_size = 16384
|
||||
output_size = 5120
|
||||
stride = 32
|
||||
group_max_len = 1024
|
||||
proj_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
hidden_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
qlen = 30
|
||||
layer_num = 10
|
||||
CPUInfer = cpuinfer_ext.CPUInfer(48)
|
||||
validation_iter = 100
|
||||
|
||||
with torch.inference_mode(mode=True):
|
||||
linears = []
|
||||
projs = []
|
||||
for _ in range(layer_num):
|
||||
proj = torch.randn((output_size, input_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
config = cpuinfer_ext.linear.LinearConfig(input_size, output_size, stride, group_max_len, proj.data_ptr(), proj_type, hidden_type)
|
||||
linear = cpuinfer_ext.linear.Linear(config)
|
||||
projs.append(proj)
|
||||
linears.append(linear)
|
||||
|
||||
# validation
|
||||
for i in range(validation_iter):
|
||||
linear = linears[i % layer_num]
|
||||
input = torch.randn((qlen, input_size), dtype=torch.float16).contiguous()
|
||||
output = torch.empty((qlen, output_size), dtype=torch.float16).contiguous()
|
||||
input = input / 100
|
||||
|
||||
CPUInfer.submit(
|
||||
linear.forward(
|
||||
qlen,
|
||||
input.data_ptr(),
|
||||
output.data_ptr()
|
||||
)
|
||||
)
|
||||
CPUInfer.sync()
|
||||
# print('cpuinfer output', output)
|
||||
|
||||
proj = projs[i%layer_num]
|
||||
t_output = torch.mm(input, proj.t())
|
||||
# print('torch output', t_output)
|
||||
|
||||
diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output))
|
||||
print('diff = ', diff)
|
||||
assert(diff < 0.001)
|
||||
82
archive/csrc/ktransformers_ext/examples/test_mlp.py
Normal file
82
archive/csrc/ktransformers_ext/examples/test_mlp.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Description :
|
||||
Author : chenht2022
|
||||
Date : 2024-07-25 10:32:05
|
||||
Version : 1.0.0
|
||||
LastEditors : chenht2022
|
||||
LastEditTime : 2024-08-06 10:37:28
|
||||
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
||||
'''
|
||||
import os, sys
|
||||
import time
|
||||
sys.path.append(os.path.dirname(__file__) + '/../build')
|
||||
import cpuinfer_ext
|
||||
import torch
|
||||
|
||||
hidden_size = 5120
|
||||
intermediate_size = 3072
|
||||
stride = 32
|
||||
group_max_len = 1024
|
||||
gate_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
up_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
down_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
hidden_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
qlen = 30
|
||||
layer_num = 10
|
||||
CPUInfer = cpuinfer_ext.CPUInfer(48)
|
||||
validation_iter = 100
|
||||
|
||||
def act_fn(x):
|
||||
return x / (1.0 + torch.exp(-x))
|
||||
|
||||
def mlp_torch(input, gate_proj, up_proj, down_proj):
|
||||
gate_buf = torch.mm(input, gate_proj.t())
|
||||
up_buf = torch.mm(input, up_proj.t())
|
||||
intermediate = act_fn(gate_buf) * up_buf
|
||||
ret = torch.mm(intermediate, down_proj.t())
|
||||
return ret
|
||||
|
||||
with torch.inference_mode(mode=True):
|
||||
mlps = []
|
||||
gate_projs = []
|
||||
up_projs = []
|
||||
down_projs = []
|
||||
for _ in range(layer_num):
|
||||
gate_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
up_proj = torch.randn((intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
down_proj = torch.randn((hidden_size, intermediate_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
config = cpuinfer_ext.mlp.MLPConfig(hidden_size, intermediate_size, stride, group_max_len, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type)
|
||||
mlp = cpuinfer_ext.mlp.MLP(config)
|
||||
gate_projs.append(gate_proj)
|
||||
up_projs.append(up_proj)
|
||||
down_projs.append(down_proj)
|
||||
mlps.append(mlp)
|
||||
|
||||
# validation
|
||||
for i in range(validation_iter):
|
||||
mlp = mlps[i % layer_num]
|
||||
input = torch.randn((qlen, hidden_size), dtype=torch.float16).contiguous()
|
||||
output = torch.empty((qlen, hidden_size), dtype=torch.float16).contiguous()
|
||||
input = input / 100
|
||||
|
||||
CPUInfer.submit(
|
||||
mlp.forward(
|
||||
qlen,
|
||||
input.data_ptr(),
|
||||
output.data_ptr()
|
||||
)
|
||||
)
|
||||
CPUInfer.sync()
|
||||
# print('cpuinfer output', output)
|
||||
|
||||
gate_proj = gate_projs[i%layer_num]
|
||||
up_proj = up_projs[i%layer_num]
|
||||
down_proj = down_projs[i%layer_num]
|
||||
t_output = mlp_torch(input, gate_proj, up_proj, down_proj)
|
||||
# print('torch output', t_output)
|
||||
|
||||
diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output))
|
||||
print('diff = ', diff)
|
||||
assert(diff < 0.001)
|
||||
121
archive/csrc/ktransformers_ext/examples/test_moe.py
Normal file
121
archive/csrc/ktransformers_ext/examples/test_moe.py
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
'''
|
||||
Description :
|
||||
Author : chenht2022
|
||||
Date : 2024-07-25 10:32:05
|
||||
Version : 1.0.0
|
||||
LastEditors : chenht2022
|
||||
LastEditTime : 2024-08-06 10:38:05
|
||||
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
||||
'''
|
||||
import os, sys
|
||||
import time
|
||||
sys.path.append(os.path.dirname(__file__) + '/../build')
|
||||
import cpuinfer_ext
|
||||
import torch
|
||||
|
||||
expert_num = 160
|
||||
hidden_size = 5120
|
||||
intermediate_size = 1536
|
||||
stride = 32
|
||||
group_min_len = 10
|
||||
group_max_len = 1024
|
||||
gate_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
up_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
down_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
hidden_type = 1 # ggml_type::GGML_TYPE_F16
|
||||
n_routed_experts = 6
|
||||
qlen = 30
|
||||
layer_num = 10
|
||||
CPUInfer = cpuinfer_ext.CPUInfer(48)
|
||||
validation_iter = 100
|
||||
|
||||
def act_fn(x):
|
||||
return x / (1.0 + torch.exp(-x))
|
||||
|
||||
def mlp_torch(input, gate_proj, up_proj, down_proj):
|
||||
gate_buf = torch.mm(input, gate_proj.t())
|
||||
up_buf = torch.mm(input, up_proj.t())
|
||||
intermediate = act_fn(gate_buf) * up_buf
|
||||
ret = torch.mm(intermediate, down_proj.t())
|
||||
return ret
|
||||
|
||||
def moe_torch(input, expert_ids, weights, gate_proj, up_proj, down_proj):
|
||||
cnts = expert_ids.new_zeros((expert_ids.shape[0], expert_num))
|
||||
cnts.scatter_(1, expert_ids, 1)
|
||||
tokens_per_expert = cnts.sum(dim=0)
|
||||
idxs = expert_ids.view(-1).argsort()
|
||||
sorted_tokens = input[idxs // expert_ids.shape[1]]
|
||||
|
||||
outputs = []
|
||||
start_idx = 0
|
||||
for i, num_tokens in enumerate(tokens_per_expert):
|
||||
end_idx = start_idx + num_tokens
|
||||
if num_tokens == 0:
|
||||
continue
|
||||
tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
|
||||
expert_out = mlp_torch(tokens_for_this_expert, gate_proj[i], up_proj[i], down_proj[i])
|
||||
outputs.append(expert_out)
|
||||
start_idx = end_idx
|
||||
|
||||
outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
|
||||
|
||||
new_x = torch.empty_like(outs)
|
||||
new_x[idxs] = outs
|
||||
t_output = (
|
||||
new_x.view(*expert_ids.shape, -1)
|
||||
.type(weights.dtype)
|
||||
.mul_(weights.unsqueeze(dim=-1))
|
||||
.sum(dim=1)
|
||||
.type(new_x.dtype)
|
||||
)
|
||||
return t_output
|
||||
|
||||
with torch.inference_mode(mode=True):
|
||||
moes = []
|
||||
gate_projs = []
|
||||
up_projs = []
|
||||
down_projs = []
|
||||
for _ in range(layer_num):
|
||||
gate_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
up_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
down_proj = torch.randn((expert_num, hidden_size, intermediate_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
||||
config = cpuinfer_ext.moe.MOEConfig(expert_num, n_routed_experts, hidden_size, intermediate_size, stride, group_min_len, group_max_len, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type)
|
||||
moe = cpuinfer_ext.moe.MOE(config)
|
||||
gate_projs.append(gate_proj)
|
||||
up_projs.append(up_proj)
|
||||
down_projs.append(down_proj)
|
||||
moes.append(moe)
|
||||
|
||||
# validation
|
||||
for i in range(validation_iter):
|
||||
expert_ids = torch.stack([torch.randperm(expert_num)[:n_routed_experts] for _ in range(qlen)]).contiguous()
|
||||
weights = torch.rand((qlen, n_routed_experts), dtype=torch.float32).contiguous()
|
||||
input = torch.randn((qlen, hidden_size), dtype=torch.float16).contiguous()
|
||||
output = torch.empty((qlen, hidden_size), dtype=torch.float16).contiguous()
|
||||
input = input / 100
|
||||
|
||||
moe = moes[i % layer_num]
|
||||
CPUInfer.submit(
|
||||
moe.forward(
|
||||
qlen,
|
||||
n_routed_experts,
|
||||
expert_ids.data_ptr(),
|
||||
weights.data_ptr(),
|
||||
input.data_ptr(),
|
||||
output.data_ptr()
|
||||
)
|
||||
)
|
||||
CPUInfer.sync()
|
||||
# print('cpuinfer output', output)
|
||||
|
||||
gate_proj = gate_projs[i%layer_num]
|
||||
up_proj = up_projs[i%layer_num]
|
||||
down_proj = down_projs[i%layer_num]
|
||||
t_output = moe_torch(input, expert_ids, weights, gate_proj, up_proj, down_proj)
|
||||
# print('torch output', t_output)
|
||||
|
||||
diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output))
|
||||
print('diff = ', diff)
|
||||
assert(diff < 0.001)
|
||||
Loading…
Add table
Add a link
Reference in a new issue