mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2026-05-17 04:09:19 +00:00
* move conversion code to a dedicated conversion directory and split the files akin to the src/models architecture --------- Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
33 lines
933 B
Python
33 lines
933 B
Python
from __future__ import annotations
|
|
|
|
from typing import Callable, TYPE_CHECKING
|
|
|
|
if TYPE_CHECKING:
|
|
from torch import Tensor
|
|
|
|
from .base import MmprojModel, ModelBase, gguf
|
|
|
|
from .llama import LlamaModel
|
|
|
|
|
|
@ModelBase.register("CogVLMForCausalLM")
|
|
class CogVLMVisionModel(MmprojModel):
|
|
|
|
def set_gguf_parameters(self):
|
|
super().set_gguf_parameters()
|
|
self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams.get("layer_norm_eps", 1e-6))
|
|
self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.COGVLM)
|
|
|
|
@classmethod
|
|
def filter_tensors(cls, item: tuple[str, Callable[[], Tensor]]) -> tuple[str, Callable[[], Tensor]] | None:
|
|
name, gen = item
|
|
|
|
if not name.startswith("model.vision."):
|
|
return None
|
|
|
|
return super().filter_tensors(item)
|
|
|
|
|
|
@ModelBase.register("CogVLMForCausalLM")
|
|
class CogVLMModel(LlamaModel):
|
|
model_arch = gguf.MODEL_ARCH.COGVLM
|