mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-05-02 21:51:30 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
121 lines
4.3 KiB
Python
121 lines
4.3 KiB
Python
#!/usr/bin/env python
|
|
# coding=utf-8
|
|
'''
|
|
Description :
|
|
Author : chenht2022
|
|
Date : 2024-07-25 10:32:05
|
|
Version : 1.0.0
|
|
LastEditors : chenht2022
|
|
LastEditTime : 2024-08-06 10:38:05
|
|
Copyright (c) 2024 by KVCache.AI, All Rights Reserved.
|
|
'''
|
|
import os, sys
|
|
import time
|
|
sys.path.append(os.path.dirname(__file__) + '/../build')
|
|
import cpuinfer_ext
|
|
import torch
|
|
|
|
expert_num = 160
|
|
hidden_size = 5120
|
|
intermediate_size = 1536
|
|
stride = 32
|
|
group_min_len = 10
|
|
group_max_len = 1024
|
|
gate_type = 1 # ggml_type::GGML_TYPE_F16
|
|
up_type = 1 # ggml_type::GGML_TYPE_F16
|
|
down_type = 1 # ggml_type::GGML_TYPE_F16
|
|
hidden_type = 1 # ggml_type::GGML_TYPE_F16
|
|
n_routed_experts = 6
|
|
qlen = 30
|
|
layer_num = 10
|
|
CPUInfer = cpuinfer_ext.CPUInfer(48)
|
|
validation_iter = 100
|
|
|
|
def act_fn(x):
|
|
return x / (1.0 + torch.exp(-x))
|
|
|
|
def mlp_torch(input, gate_proj, up_proj, down_proj):
|
|
gate_buf = torch.mm(input, gate_proj.t())
|
|
up_buf = torch.mm(input, up_proj.t())
|
|
intermediate = act_fn(gate_buf) * up_buf
|
|
ret = torch.mm(intermediate, down_proj.t())
|
|
return ret
|
|
|
|
def moe_torch(input, expert_ids, weights, gate_proj, up_proj, down_proj):
|
|
cnts = expert_ids.new_zeros((expert_ids.shape[0], expert_num))
|
|
cnts.scatter_(1, expert_ids, 1)
|
|
tokens_per_expert = cnts.sum(dim=0)
|
|
idxs = expert_ids.view(-1).argsort()
|
|
sorted_tokens = input[idxs // expert_ids.shape[1]]
|
|
|
|
outputs = []
|
|
start_idx = 0
|
|
for i, num_tokens in enumerate(tokens_per_expert):
|
|
end_idx = start_idx + num_tokens
|
|
if num_tokens == 0:
|
|
continue
|
|
tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
|
|
expert_out = mlp_torch(tokens_for_this_expert, gate_proj[i], up_proj[i], down_proj[i])
|
|
outputs.append(expert_out)
|
|
start_idx = end_idx
|
|
|
|
outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
|
|
|
|
new_x = torch.empty_like(outs)
|
|
new_x[idxs] = outs
|
|
t_output = (
|
|
new_x.view(*expert_ids.shape, -1)
|
|
.type(weights.dtype)
|
|
.mul_(weights.unsqueeze(dim=-1))
|
|
.sum(dim=1)
|
|
.type(new_x.dtype)
|
|
)
|
|
return t_output
|
|
|
|
with torch.inference_mode(mode=True):
|
|
moes = []
|
|
gate_projs = []
|
|
up_projs = []
|
|
down_projs = []
|
|
for _ in range(layer_num):
|
|
gate_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
|
up_proj = torch.randn((expert_num, intermediate_size, hidden_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
|
down_proj = torch.randn((expert_num, hidden_size, intermediate_size), dtype=torch.float16, device = "cuda").to("cpu").contiguous()
|
|
config = cpuinfer_ext.moe.MOEConfig(expert_num, n_routed_experts, hidden_size, intermediate_size, stride, group_min_len, group_max_len, gate_proj.data_ptr(), up_proj.data_ptr(), down_proj.data_ptr(), gate_type, up_type, down_type, hidden_type)
|
|
moe = cpuinfer_ext.moe.MOE(config)
|
|
gate_projs.append(gate_proj)
|
|
up_projs.append(up_proj)
|
|
down_projs.append(down_proj)
|
|
moes.append(moe)
|
|
|
|
# validation
|
|
for i in range(validation_iter):
|
|
expert_ids = torch.stack([torch.randperm(expert_num)[:n_routed_experts] for _ in range(qlen)]).contiguous()
|
|
weights = torch.rand((qlen, n_routed_experts), dtype=torch.float32).contiguous()
|
|
input = torch.randn((qlen, hidden_size), dtype=torch.float16).contiguous()
|
|
output = torch.empty((qlen, hidden_size), dtype=torch.float16).contiguous()
|
|
input = input / 100
|
|
|
|
moe = moes[i % layer_num]
|
|
CPUInfer.submit(
|
|
moe.forward(
|
|
qlen,
|
|
n_routed_experts,
|
|
expert_ids.data_ptr(),
|
|
weights.data_ptr(),
|
|
input.data_ptr(),
|
|
output.data_ptr()
|
|
)
|
|
)
|
|
CPUInfer.sync()
|
|
# print('cpuinfer output', output)
|
|
|
|
gate_proj = gate_projs[i%layer_num]
|
|
up_proj = up_projs[i%layer_num]
|
|
down_proj = down_projs[i%layer_num]
|
|
t_output = moe_torch(input, expert_ids, weights, gate_proj, up_proj, down_proj)
|
|
# print('torch output', t_output)
|
|
|
|
diff = torch.mean(torch.abs(output - t_output)) / torch.mean(torch.abs(t_output))
|
|
print('diff = ', diff)
|
|
assert(diff < 0.001)
|