From 83bc2f288c0e08e676d9beca9c4669197e920593 Mon Sep 17 00:00:00 2001 From: Gabriel Larson <55459720+gabriellarson@users.noreply.github.com> Date: Sun, 3 Aug 2025 09:56:25 -0500 Subject: [PATCH] model : add text-only support for Kimi-VL (and find special tokens in text_config) (#15051) * basic kimi-vl textmodel conversion * check config["text_config"] for special tokens --- convert_hf_to_gguf.py | 8 ++++++++ gguf-py/gguf/vocab.py | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index f13f8558b..5f15c8257 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -6059,6 +6059,7 @@ class DeepseekModel(TextModel): @ModelBase.register("DeepseekV2ForCausalLM") @ModelBase.register("DeepseekV3ForCausalLM") +@ModelBase.register("KimiVLForConditionalGeneration") class DeepseekV2Model(TextModel): model_arch = gguf.MODEL_ARCH.DEEPSEEK2 @@ -6161,6 +6162,13 @@ class DeepseekV2Model(TextModel): _experts: list[dict[str, Tensor]] | None = None def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # skip vision tensors and remove "language_model." for Kimi-VL + if "vision_tower" in name or "multi_modal_projector" in name: + return [] + + if name.startswith("language_model."): + name = name.replace("language_model.", "") + # rename e_score_correction_bias tensors if name.endswith("e_score_correction_bias"): name = name.replace("e_score_correction_bias", "e_score_correction.bias") diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index e1d5aaf47..7111557bf 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -312,7 +312,11 @@ class SpecialVocab: with open(config_file, encoding = 'utf-8') as f: config = json.load(f) for typ in self.special_token_types: - self._set_special_token(typ, config.get(f'{typ}_token_id')) + token_id = config.get(f'{typ}_token_id') + # If not found at root, check in text_config (for multimodal models like Kimi-VL) + if token_id is None and 'text_config' in config: + token_id = config['text_config'].get(f'{typ}_token_id') + self._set_special_token(typ, token_id) return True