mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2026-04-28 11:49:51 +00:00
* refactor: move legacy code to archive/ directory - Moved ktransformers, csrc, third_party, merge_tensors to archive/ - Moved build scripts and configurations to archive/ - Kept kt-kernel, KT-SFT, doc, and README files in root - Preserved complete git history for all moved files * refactor: restructure repository to focus on kt-kernel and KT-SFT modules * fix README * fix README * fix README * fix README * docs: add performance benchmarks to kt-kernel section Add comprehensive performance data for kt-kernel to match KT-SFT's presentation: - AMX kernel optimization: 21.3 TFLOPS (3.9× faster than PyTorch) - Prefill phase: up to 20× speedup vs baseline - Decode phase: up to 4× speedup - NUMA optimization: up to 63% throughput improvement - Multi-GPU (8×L20): 227.85 tokens/s total throughput with DeepSeek-R1 FP8 Source: https://lmsys.org/blog/2025-10-22-KTransformers/ This provides users with concrete performance metrics for both core modules, making it easier to understand the capabilities of each component. * refactor: improve kt-kernel performance data with specific hardware and models Replace generic performance descriptions with concrete benchmarks: - Specify exact hardware: 8×L20 GPU + Xeon Gold 6454S, Single/Dual-socket Xeon + AMX - Include specific models: DeepSeek-R1-0528 (FP8), DeepSeek-V3 (671B) - Show detailed metrics: total throughput, output throughput, concurrency details - Match KT-SFT presentation style for consistency This provides users with actionable performance data they can use to evaluate hardware requirements and expected performance for their use cases. * fix README * docs: clean up performance table and improve formatting * add pic for README * refactor: simplify .gitmodules and backup legacy submodules - Remove 7 legacy submodules from root .gitmodules (archive/third_party/*) - Keep only 2 active submodules for kt-kernel (llama.cpp, pybind11) - Backup complete .gitmodules to archive/.gitmodules - Add documentation in archive/README.md for researchers who need legacy submodules This reduces initial clone size by ~500MB and avoids downloading unused dependencies. * refactor: move doc/ back to root directory Keep documentation in root for easier access and maintenance. * refactor: consolidate all images to doc/assets/ - Move kt-kernel/assets/heterogeneous_computing.png to doc/assets/ - Remove KT-SFT/assets/ (images already in doc/assets/) - Update KT-SFT/README.md image references to ../doc/assets/ - Eliminates ~7.9MB image duplication - Centralizes all documentation assets in one location * fix pic path for README
388 lines
11 KiB
YAML
388 lines
11 KiB
YAML
- match:
|
||
name: "^model.embed_tokens"
|
||
replace:
|
||
class: "default"
|
||
kwargs:
|
||
generate_device: "cpu"
|
||
prefill_device: "cpu"
|
||
|
||
# === Rotary Embedding Replacement ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\."
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3RotaryEmbedding
|
||
replace:
|
||
class: ktransformers.operators.RoPE.YarnRotaryEmbeddingV3
|
||
kwargs:
|
||
generate_device: "cuda:0"
|
||
prefill_device: "cuda:0"
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\."
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3RotaryEmbedding
|
||
replace:
|
||
class: ktransformers.operators.RoPE.YarnRotaryEmbeddingV3
|
||
kwargs:
|
||
generate_device: "cuda:1"
|
||
prefill_device: "cuda:1"
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\."
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3RotaryEmbedding
|
||
replace:
|
||
class: ktransformers.operators.RoPE.YarnRotaryEmbeddingV3
|
||
kwargs:
|
||
generate_device: "cuda:2"
|
||
prefill_device: "cuda:2"
|
||
|
||
# GPU 3: layers 45–60
|
||
- match:
|
||
name: "^model\\.layers\\.(4[5-9]|5[0-9]|60)\\."
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3RotaryEmbedding
|
||
replace:
|
||
class: ktransformers.operators.RoPE.YarnRotaryEmbeddingV3
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|
||
|
||
# === Linear Layers Replacement (excluding self_attn.kv_b_proj) ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\.(?!self_attn\\.kv_b_proj).*$"
|
||
class: torch.nn.Linear
|
||
replace:
|
||
class: ktransformers.operators.linear.KTransformersLinear
|
||
kwargs:
|
||
generate_device: "cuda:0"
|
||
prefill_device: "cuda:0"
|
||
generate_op: "KLinearMarlin"
|
||
prefill_op: "KLinearTorch"
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\.(?!self_attn\\.kv_b_proj).*$"
|
||
class: torch.nn.Linear
|
||
replace:
|
||
class: ktransformers.operators.linear.KTransformersLinear
|
||
kwargs:
|
||
generate_device: "cuda:1"
|
||
prefill_device: "cuda:1"
|
||
generate_op: "KLinearMarlin"
|
||
prefill_op: "KLinearTorch"
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\.(?!self_attn\\.kv_b_proj).*$"
|
||
class: torch.nn.Linear
|
||
replace:
|
||
class: ktransformers.operators.linear.KTransformersLinear
|
||
kwargs:
|
||
generate_device: "cuda:2"
|
||
prefill_device: "cuda:2"
|
||
generate_op: "KLinearMarlin"
|
||
prefill_op: "KLinearTorch"
|
||
|
||
# GPU 3: layers 45–60
|
||
- match:
|
||
name: "^model\\.layers\\.(4[5-9]|5[0-9]|60)\\.(?!self_attn\\.kv_b_proj).*$"
|
||
class: torch.nn.Linear
|
||
replace:
|
||
class: ktransformers.operators.linear.KTransformersLinear
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|
||
generate_op: "KLinearMarlin"
|
||
prefill_op: "KLinearTorch"
|
||
|
||
# === MLP (MoE) Replacement ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\.mlp$"
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3MoE
|
||
replace:
|
||
class: ktransformers.operators.experts.KDeepseekV3MoE
|
||
kwargs:
|
||
generate_device: "cuda:0"
|
||
prefill_device: "cuda:0"
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\.mlp$"
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3MoE
|
||
replace:
|
||
class: ktransformers.operators.experts.KDeepseekV3MoE
|
||
kwargs:
|
||
generate_device: "cuda:1"
|
||
prefill_device: "cuda:1"
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\.mlp$"
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3MoE
|
||
replace:
|
||
class: ktransformers.operators.experts.KDeepseekV3MoE
|
||
kwargs:
|
||
generate_device: "cuda:2"
|
||
prefill_device: "cuda:2"
|
||
|
||
# GPU 3: layers 45–60
|
||
- match:
|
||
name: "^model\\.layers\\.(4[5-9]|5[0-9]|60)\\.mlp$"
|
||
class: ktransformers.models.modeling_deepseek_v3.DeepseekV3MoE
|
||
replace:
|
||
class: ktransformers.operators.experts.KDeepseekV3MoE
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|
||
|
||
# === MLP Gate Replacement ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\.mlp\\.gate$"
|
||
class: ktransformers.models.modeling_deepseek_v3.MoEGate
|
||
replace:
|
||
class: ktransformers.operators.gate.KMoEGate
|
||
kwargs:
|
||
generate_device: "cuda:0"
|
||
prefill_device: "cuda:0"
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\.mlp\\.gate$"
|
||
class: ktransformers.models.modeling_deepseek_v3.MoEGate
|
||
replace:
|
||
class: ktransformers.operators.gate.KMoEGate
|
||
kwargs:
|
||
generate_device: "cuda:1"
|
||
prefill_device: "cuda:1"
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\.mlp\\.gate$"
|
||
class: ktransformers.models.modeling_deepseek_v3.MoEGate
|
||
replace:
|
||
class: ktransformers.operators.gate.KMoEGate
|
||
kwargs:
|
||
generate_device: "cuda:2"
|
||
prefill_device: "cuda:2"
|
||
|
||
# GPU 3: layers 45–60
|
||
- match:
|
||
name: "^model\\.layers\\.(4[5-9]|5[0-9]|60)\\.mlp\\.gate$"
|
||
class: ktransformers.models.modeling_deepseek_v3.MoEGate
|
||
replace:
|
||
class: ktransformers.operators.gate.KMoEGate
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|
||
|
||
# === MLP Experts Replacement ===
|
||
# replace with marlin expert. Open and modify layer-num as needed.
|
||
# Each layer of malin experts takes about 6GB of GPU memory.
|
||
# !!!Do remember 'close' cuda graph if you are using marlin expert.!!!
|
||
# !!!KExpertsTorch is untested, we don't have enough VRAM.!!!
|
||
|
||
# GPU 0: layers 3–4
|
||
# - match:
|
||
# name: "^model\\.layers\\.([3-4])\\.mlp\\.experts$"
|
||
# replace:
|
||
# class: ktransformers.operators.experts.KTransformersExperts
|
||
# kwargs:
|
||
# generate_device: "cuda:0"
|
||
# generate_op: "KExpertsMarlin"
|
||
# recursive: False
|
||
|
||
# # GPU 1: layers 15–17
|
||
# - match:
|
||
# name: "^model\\.layers\\.(1[5-7])\\.mlp\\.experts$"
|
||
# replace:
|
||
# class: ktransformers.operators.experts.KTransformersExperts
|
||
# kwargs:
|
||
# generate_device: "cuda:1"
|
||
# generate_op: "KExpertsMarlin"
|
||
# recursive: False
|
||
|
||
# # GPU 2: layers 30–32
|
||
# - match:
|
||
# name: "^model\\.layers\\.(3[0-2])\\.mlp\\.experts$"
|
||
# replace:
|
||
# class: ktransformers.operators.experts.KTransformersExperts
|
||
# kwargs:
|
||
# generate_device: "cuda:2"
|
||
# generate_op: "KExpertsMarlin"
|
||
# recursive: False
|
||
|
||
# # GPU 3: layers 45–46
|
||
# - match:
|
||
# name: "^model\\.layers\\.(4[5-6])\\.mlp\\.experts$"
|
||
# replace:
|
||
# class: ktransformers.operators.experts.KTransformersExperts
|
||
# kwargs:
|
||
# generate_device: "cuda:3"
|
||
# generate_op: "KExpertsMarlin"
|
||
# recursive: False
|
||
|
||
|
||
# === MLP Experts Replacement ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\.mlp\\.experts$"
|
||
replace:
|
||
class: ktransformers.operators.experts.KTransformersExperts
|
||
kwargs:
|
||
prefill_device: "cuda:0"
|
||
prefill_op: "KExpertsTorch"
|
||
generate_device: "cpu"
|
||
generate_op: "KExpertsCPU"
|
||
out_device: "cuda:0"
|
||
recursive: False
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\.mlp\\.experts$"
|
||
replace:
|
||
class: ktransformers.operators.experts.KTransformersExperts
|
||
kwargs:
|
||
prefill_device: "cuda:1"
|
||
prefill_op: "KExpertsTorch"
|
||
generate_device: "cpu"
|
||
generate_op: "KExpertsCPU"
|
||
out_device: "cuda:1"
|
||
recursive: False
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\.mlp\\.experts$"
|
||
replace:
|
||
class: ktransformers.operators.experts.KTransformersExperts
|
||
kwargs:
|
||
prefill_device: "cuda:2"
|
||
prefill_op: "KExpertsTorch"
|
||
generate_device: "cpu"
|
||
generate_op: "KExpertsCPU"
|
||
out_device: "cuda:2"
|
||
recursive: False
|
||
|
||
# GPU 3: layers 45–60
|
||
- match:
|
||
name: "^model\\.layers\\.(4[5-9]|5[0-9]|60)\\.mlp\\.experts$"
|
||
replace:
|
||
class: ktransformers.operators.experts.KTransformersExperts
|
||
kwargs:
|
||
prefill_device: "cuda:3"
|
||
prefill_op: "KExpertsTorch"
|
||
generate_device: "cpu"
|
||
generate_op: "KExpertsCPU"
|
||
out_device: "cuda:3"
|
||
recursive: False
|
||
|
||
# === Self-Attention Replacement ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\.self_attn$"
|
||
replace:
|
||
class: ktransformers.operators.attention.KDeepseekV2Attention
|
||
kwargs:
|
||
generate_device: "cuda:0"
|
||
prefill_device: "cuda:0"
|
||
absorb_for_prefill: False
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\.self_attn$"
|
||
replace:
|
||
class: ktransformers.operators.attention.KDeepseekV2Attention
|
||
kwargs:
|
||
generate_device: "cuda:1"
|
||
prefill_device: "cuda:1"
|
||
absorb_for_prefill: False
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\.self_attn$"
|
||
replace:
|
||
class: ktransformers.operators.attention.KDeepseekV2Attention
|
||
kwargs:
|
||
generate_device: "cuda:2"
|
||
prefill_device: "cuda:2"
|
||
absorb_for_prefill: False
|
||
|
||
# GPU 3: layers 45–60
|
||
- match:
|
||
name: "^model\\.layers\\.(4[5-9]|5[0-9]|60)\\.self_attn$"
|
||
replace:
|
||
class: ktransformers.operators.attention.KDeepseekV2Attention
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|
||
absorb_for_prefill: False
|
||
|
||
# === Overall Model Replacement with Transfer Map ===
|
||
|
||
- match:
|
||
name: "^model$"
|
||
replace:
|
||
class: "ktransformers.operators.models.KDeepseekV2Model"
|
||
kwargs:
|
||
per_layer_prefill_intput_threshold: 0 # 0 means close layer‐wise prefill
|
||
transfer_map:
|
||
15: "cuda:1" # Layers 15+ on GPU 1
|
||
30: "cuda:2" # Layers 30+ on GPU 2
|
||
45: "cuda:3" # Layers 45+ on GPU 3
|
||
|
||
# === Default Catch-All for Other Modules ===
|
||
|
||
# GPU 0: layers 0–14
|
||
- match:
|
||
name: "^model\\.layers\\.([0-9]|1[0-4])\\."
|
||
replace:
|
||
class: "default"
|
||
kwargs:
|
||
generate_device: "cuda:0"
|
||
prefill_device: "cuda:0"
|
||
|
||
# GPU 1: layers 15–29
|
||
- match:
|
||
name: "^model\\.layers\\.(1[5-9]|2[0-9])\\."
|
||
replace:
|
||
class: "default"
|
||
kwargs:
|
||
generate_device: "cuda:1"
|
||
prefill_device: "cuda:1"
|
||
|
||
# GPU 2: layers 30–44
|
||
- match:
|
||
name: "^model\\.layers\\.(3[0-9]|4[0-4])\\."
|
||
replace:
|
||
class: "default"
|
||
kwargs:
|
||
generate_device: "cuda:2"
|
||
prefill_device: "cuda:2"
|
||
|
||
- match:
|
||
name: "^lm_head"
|
||
class: torch.nn.Linear
|
||
replace:
|
||
class: ktransformers.operators.linear.KTransformersLinear
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|
||
generate_op: "KLinearMarlin"
|
||
prefill_op: "KLinearTorch"
|
||
|
||
# For final modules (model.norm), ensure they are on GPU 3 (as in your original config)
|
||
- match:
|
||
name: "(^model\\.layers\\.(4[5-9]|5[0-9]|60)\\.)|(^model\\.norm)"
|
||
replace:
|
||
class: "default"
|
||
kwargs:
|
||
generate_device: "cuda:3"
|
||
prefill_device: "cuda:3"
|