mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-05 12:09:48 +00:00
41 lines
1.7 KiB
YAML
41 lines
1.7 KiB
YAML
- match:
|
|
class: ktransformers.models.modeling_deepseek.DeepseekV2YarnRotaryEmbedding
|
|
replace:
|
|
class: ktransformers.operators.RoPE.YarnRotaryEmbedding
|
|
- match:
|
|
name: "^model\\.layers\\.(?!.*self_attn).*$" # regular expression
|
|
class: torch.nn.Linear # only match modules matching name and class simultaneously
|
|
replace:
|
|
class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types
|
|
kwargs:
|
|
generate_device: "cuda"
|
|
prefill_device: "cuda"
|
|
generate_op: "QuantizedLinearMarlin"
|
|
prefill_op: "QuantizedLinearTorch"
|
|
- match:
|
|
name: "^model\\.layers\\..*\\.mlp$"
|
|
class: ktransformers.models.modeling_deepseek.DeepseekV2MoE
|
|
replace:
|
|
class: ktransformers.operators.experts.DeepseekV2MoEInjected # mlp module with custom forward function
|
|
- match:
|
|
name: "^model\\.layers\\..*\\.mlp\\.experts$"
|
|
replace:
|
|
class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert paralleism
|
|
device: "cpu" # which devices to load this module when initializing
|
|
kwargs:
|
|
prefill_device: "cuda"
|
|
prefill_mlp_type: "MLPExpertsTorch"
|
|
generate_device: "cpu"
|
|
generate_mlp_type: "MLPCPUExperts"
|
|
out_device: "cuda"
|
|
recursive: False # don't recursively inject submodules of this module
|
|
- match:
|
|
name: "^model\\.layers\\..*\\.self_attn$"
|
|
replace:
|
|
class: ktransformers.operators.attention.DeepseekV2AttentionInjected # optimized MLA implementation
|
|
- match:
|
|
name: "^model$"
|
|
replace:
|
|
class: "ktransformers.operators.layer_wise_prefill.DeepseekV2ModelPerLayerPrefill"
|
|
kwargs:
|
|
per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill
|