- match: name: "^model\\.layers\\..*\\.|^lm_head" replace: class: "default" kwargs: generate_device: "cuda" prefill_device: "cuda" - match: class: ktransformers.models.modeling_deepseek.DeepseekV2YarnRotaryEmbedding replace: class: ktransformers.operators.RoPE.YarnRotaryEmbedding - match: name: "^model\\.layers\\.(?!.*self_attn).*$" # regular expression class: torch.nn.Linear # only match modules matching name and class simultaneously replace: class: ktransformers.operators.linear.KTransformerLinear # optimized Kernel on quantized data types kwargs: generate_device: "cuda" prefill_device: "cuda" generate_op: "QuantizedLinearMarlin" prefill_op: "QuantizedLinearTorch" - match: name: "^model\\.layers\\..*\\.mlp$" class: ktransformers.models.modeling_deepseek.DeepseekV2MoE replace: class: ktransformers.operators.experts.DeepseekV2MoEInjected # mlp module with custom forward function - match: name: "^model\\.layers\\..*\\.mlp\\.experts$" replace: class: ktransformers.operators.experts.KTransformersMLPExpert # custom MoE Kernel with expert paralleism kwargs: prefill_device: "cuda" prefill_mlp_type: "MLPExpertsTorch" generate_device: "cpu" generate_mlp_type: "MLPCPUExperts" out_device: "cuda" recursive: False # don't recursively inject submodules of this module - match: name: "^model\\.layers\\..*\\.self_attn$" replace: class: ktransformers.operators.attention.DeepseekV2AttentionInjected # optimized MLA implementation - match: name: "^model$" replace: class: "ktransformers.operators.layer_wise_prefill.DeepseekV2ModelKTransformers" kwargs: per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill - match: name: "^model.embed_tokens" replace: class: "default" kwargs: generate_device: "cpu" prefill_device: "cpu"