mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-10 15:29:39 +00:00
add XPU support for qwen3moe local chat
This commit is contained in:
parent
25893366b6
commit
adc0906967
9 changed files with 223 additions and 25 deletions
80
ktransformers/optimize/optimize_rules/xpu/Qwen3Moe-Chat.yaml
Normal file
80
ktransformers/optimize/optimize_rules/xpu/Qwen3Moe-Chat.yaml
Normal file
|
@ -0,0 +1,80 @@
|
|||
- match:
|
||||
name: "rotary_emb$"
|
||||
replace:
|
||||
class: ktransformers.operators.RoPE.KQwen3MoeRotaryEmbedding
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
||||
- match:
|
||||
name: "^lm_head$" # regular expression
|
||||
class: torch.nn.Linear # only match modules matching name and class simultaneously
|
||||
replace:
|
||||
class: ktransformers.operators.linear.KTransformersLinear # optimized Kernel on quantized data types
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
||||
generate_op: "KLinearIPEXLLM"
|
||||
prefill_op: "KLinearIPEXLLM"
|
||||
- match:
|
||||
name: "^model\\.layers\\.(?!.*mlp\\.gate).*$" # regular expression
|
||||
class: torch.nn.Linear # only match modules matching name and class simultaneously
|
||||
replace:
|
||||
class: ktransformers.operators.linear.KTransformersLinear # optimized Kernel on quantized data types
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
||||
generate_op: "KLinearIPEXLLM"
|
||||
prefill_op: "KLinearIPEXLLM"
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.mlp$"
|
||||
class: transformers.models.qwen3_moe.modeling_qwen3_moe.Qwen3MoeSparseMoeBlock
|
||||
replace:
|
||||
class: ktransformers.operators.experts.KQwen3MoeSparseMoeBlockV2 # mlp module with custom forward function
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.mlp\\.experts$"
|
||||
replace:
|
||||
class: ktransformers.operators.experts.KTransformersExpertsV2 # custom MoE Kernel with expert paralleism
|
||||
kwargs:
|
||||
prefill_device: "xpu"
|
||||
prefill_op: "KExpertsTorch"
|
||||
generate_device: "cpu"
|
||||
generate_op: "KExpertsCPU"
|
||||
out_device: "xpu"
|
||||
recursive: False # don't recursively inject submodules of this module
|
||||
- match:
|
||||
name: "^model\\.layers\\..*\\.self_attn$"
|
||||
replace:
|
||||
class: ktransformers.operators.attention.KQwen3MoeAttentionIPEXLLM
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
||||
- match:
|
||||
name: "^model$"
|
||||
replace:
|
||||
class: "ktransformers.operators.models.KQwen2MoeModel"
|
||||
kwargs:
|
||||
per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill
|
||||
- match:
|
||||
name: "^model.embed_tokens"
|
||||
replace:
|
||||
class: "default"
|
||||
kwargs:
|
||||
generate_device: "cpu"
|
||||
prefill_device: "cpu"
|
||||
- match:
|
||||
class: transformers.models.qwen3_moe.modeling_qwen3_moe.Qwen3MoeRMSNorm
|
||||
replace:
|
||||
class: ktransformers.operators.layernorm.KDeepseekRMSNormIPEXLLM
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
||||
- match:
|
||||
class: transformers.models.qwen3_moe.modeling_qwen3_moe.Qwen3MoeMLP
|
||||
replace:
|
||||
class: ktransformers.operators.mlp.KQwen2MoeMLP
|
||||
kwargs:
|
||||
generate_device: "xpu"
|
||||
prefill_device: "xpu"
|
Loading…
Add table
Add a link
Reference in a new issue