mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-14 17:19:42 +00:00
Initial commit
This commit is contained in:
commit
18c42e67df
247 changed files with 53775 additions and 0 deletions
41
ktransformers/optimize/optimize_rules/DeepSeek-V2-Chat.yaml
Normal file
41
ktransformers/optimize/optimize_rules/DeepSeek-V2-Chat.yaml
Normal file
|
@ -0,0 +1,41 @@
|
|||
- match:
|
||||
class: ktransformers.models.modeling_deepseek.DeepseekV2YarnRotaryEmbedding
|
||||
replace:
|
||||
class: ktransformers.operators.RoPE.YarnRotaryEmbedding
|
||||
- match:
|
||||
name: "^model\\.layers\\.(?!.*self_attn).*$" # regular expression
|
||||
class: torch.nn.Linear # only match modules matching name and class simultaneously
|
||||
replace:
|
||||
class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types
|
||||
kwargs:
|
||||
generate_device: "cuda"
|
||||
prefill_device: "cuda"
|
||||
generate_op: "QuantizedLinearMarlin"
|
||||
prefill_op: "QuantizedLinearTorch"
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.mlp$"
|
||||
class: ktransformers.models.modeling_deepseek.DeepseekV2MoE
|
||||
replace:
|
||||
class: ktransformers.operators.experts.DeepseekV2MoEInjected # mlp module with custom forward function
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.mlp\\.experts$"
|
||||
replace:
|
||||
class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert paralleism
|
||||
device: "cpu" # which devices to load this module when initializing
|
||||
kwargs:
|
||||
prefill_device: "cuda"
|
||||
prefill_mlp_type: "MLPExpertsTorch"
|
||||
generate_device: "cpu"
|
||||
generate_mlp_type: "MLPCPUExperts"
|
||||
out_device: "cuda"
|
||||
recursive: False # don't recursively inject submodules of this module
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.self_attn$"
|
||||
replace:
|
||||
class: ktransformers.operators.attention.DeepseekV2AttentionInjected # optimized MLA implementation
|
||||
- match:
|
||||
name: "^model$"
|
||||
replace:
|
||||
class: "ktransformers.operators.layer_wise_prefill.DeepseekV2ModelPerLayerPrefill"
|
||||
kwargs:
|
||||
per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill
|
|
@ -0,0 +1,37 @@
|
|||
- match:
|
||||
class: ktransformers.models.modeling_qwen2_moe.Qwen2MoeRotaryEmbedding
|
||||
replace:
|
||||
class: ktransformers.operators.RoPE.RotaryEmbedding
|
||||
- match:
|
||||
name: "^model\\.layers\\..*$" # regular expression
|
||||
class: torch.nn.Linear # only match modules matching name and class simultaneously
|
||||
replace:
|
||||
class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types
|
||||
kwargs:
|
||||
generate_device: "cuda"
|
||||
prefill_device: "cuda"
|
||||
generate_op: "QuantizedLinearMarlin"
|
||||
prefill_op: "QuantizedLinearTorch"
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.mlp$"
|
||||
class: ktransformers.models.modeling_qwen2_moe.Qwen2MoeSparseMoeBlock
|
||||
replace:
|
||||
class: ktransformers.operators.experts.Qwen2MoeSparseMoeBlockInjected # mlp module with custom forward function
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.mlp\\.experts$"
|
||||
replace:
|
||||
class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert paralleism
|
||||
device: "cpu" # which devices to load this module when initializing
|
||||
kwargs:
|
||||
prefill_device: "cuda"
|
||||
prefill_mlp_type: "MLPExpertsTorch"
|
||||
generate_device: "cpu"
|
||||
generate_mlp_type: "MLPCPUExperts"
|
||||
out_device: "cuda"
|
||||
recursive: False # don't recursively inject submodules of this module
|
||||
- match:
|
||||
name: "^model$"
|
||||
replace:
|
||||
class: "ktransformers.operators.layer_wise_prefill.Qwen2MoeModelPerLayerPrefill"
|
||||
kwargs:
|
||||
per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill
|
Loading…
Add table
Add a link
Reference in a new issue