mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-05 03:59:54 +00:00
Update DeepSeek-V3-Chat-multi-gpu-fp8-linear-ggml-experts.yaml
This commit is contained in:
parent
3aee0fa099
commit
c9a0c44213
1 changed files with 2 additions and 2 deletions
|
@ -66,7 +66,7 @@
|
|||
name: "^model\\.layers\\.(0|[1-9]|[12][0-9])\\.mlp\\.gate$"
|
||||
class: ktransformers.models.modeling_deepseek_v3.MoEGate
|
||||
replace:
|
||||
class: ktransformers.operators.gate.KMoEGate
|
||||
class: ktransformers.operators.gate.KMoEGateDeepSeekV3
|
||||
kwargs:
|
||||
generate_device: "cuda:0"
|
||||
prefill_device: "cuda:0"
|
||||
|
@ -74,7 +74,7 @@
|
|||
name: "^model\\.layers\\.([3456][0-9])\\.mlp\\.gate$"
|
||||
class: ktransformers.models.modeling_deepseek_v3.MoEGate
|
||||
replace:
|
||||
class: ktransformers.operators.gate.KMoEGate # mlp module with custom forward function
|
||||
class: ktransformers.operators.gate.KMoEGateDeepSeekV3 # mlp module with custom forward function
|
||||
kwargs:
|
||||
generate_device: "cuda:1"
|
||||
prefill_device: "cuda:1"
|
||||
|
|
Loading…
Add table
Reference in a new issue