temp merge, not working

This commit is contained in:
Concedo 2025-05-03 11:42:01 +08:00
commit 0951ad9f58
21 changed files with 2802 additions and 1308 deletions

View file

@ -455,8 +455,12 @@ class ModelBase:
class TextModel(ModelBase): class TextModel(ModelBase):
model_type = ModelType.TEXT
hf_arch: str
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.hf_arch = get_model_architecture(self.hparams, self.model_type)
if "text_config" in self.hparams: if "text_config" in self.hparams:
# move the text_config to the root level # move the text_config to the root level
@ -506,7 +510,7 @@ class TextModel(ModelBase):
def set_gguf_parameters(self): def set_gguf_parameters(self):
self.gguf_writer.add_block_count(self.block_count) self.gguf_writer.add_block_count(self.block_count)
if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None: if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions"], optional=True)) is not None:
self.gguf_writer.add_context_length(n_ctx) self.gguf_writer.add_context_length(n_ctx)
logger.info(f"gguf: context length = {n_ctx}") logger.info(f"gguf: context length = {n_ctx}")
@ -1075,10 +1079,36 @@ class TextModel(ModelBase):
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None: if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0]) self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
def _try_set_pooling_type(self) -> None:
# get pooling path
pooling_path = None
module_path = self.dir_model / "modules.json"
if module_path.is_file():
with open(module_path, encoding="utf-8") as f:
modules = json.load(f)
for mod in modules:
if mod["type"] == "sentence_transformers.models.Pooling":
pooling_path = mod["path"]
break
# get pooling type
if pooling_path is not None:
with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
pooling = json.load(f)
if pooling["pooling_mode_mean_tokens"]:
pooling_type = gguf.PoolingType.MEAN
elif pooling["pooling_mode_cls_token"]:
pooling_type = gguf.PoolingType.CLS
elif pooling["pooling_mode_lasttoken"]:
pooling_type = gguf.PoolingType.LAST
else:
raise NotImplementedError("Only MEAN, CLS, and LAST pooling types supported")
self.gguf_writer.add_pooling_type(pooling_type)
class VisionModel(ModelBase): class VisionModel(ModelBase):
model_type = ModelType.VISION
model_arch = gguf.MODEL_ARCH.CLIP_VISION model_arch = gguf.MODEL_ARCH.CLIP_VISION
n_text_embd = 0
preprocessor_config: dict[str, Any] preprocessor_config: dict[str, Any]
global_config: dict[str, Any] global_config: dict[str, Any]
@ -1089,6 +1119,8 @@ class VisionModel(ModelBase):
raise TypeError("VisionModel must be subclassed with model_arch = gguf.MODEL_ARCH.CLIP_VISION") raise TypeError("VisionModel must be subclassed with model_arch = gguf.MODEL_ARCH.CLIP_VISION")
# get n_embd of the text model # get n_embd of the text model
if "text_config" not in self.hparams:
self.hparams["text_config"] = {}
text_config = {**self.hparams, **self.hparams["text_config"]} text_config = {**self.hparams, **self.hparams["text_config"]}
self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0)) self.n_embd_text = text_config.get("hidden_size", text_config.get("n_embd", 0))
assert self.n_embd_text > 0, "n_embd not found in hparams" assert self.n_embd_text > 0, "n_embd not found in hparams"
@ -2540,7 +2572,7 @@ class QwenModel(TextModel):
self.gguf_writer.add_file_type(self.ftype) self.gguf_writer.add_file_type(self.ftype)
@ModelBase.register("Qwen2ForCausalLM") @ModelBase.register("Qwen2Model", "Qwen2ForCausalLM")
class Qwen2Model(TextModel): class Qwen2Model(TextModel):
model_arch = gguf.MODEL_ARCH.QWEN2 model_arch = gguf.MODEL_ARCH.QWEN2
@ -2552,12 +2584,18 @@ class Qwen2Model(TextModel):
def set_gguf_parameters(self): def set_gguf_parameters(self):
super().set_gguf_parameters() super().set_gguf_parameters()
self._try_set_pooling_type()
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
if self.hparams["rope_scaling"].get("type") == "yarn": if self.hparams["rope_scaling"].get("type") == "yarn":
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
if self.hf_arch == "Qwen2Model":
name = f"model.{name}" # map to Qwen2ForCausalLM tensors
yield from super().modify_tensors(data_torch, name, bid)
@ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration") @ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
class Qwen2VLModel(TextModel): class Qwen2VLModel(TextModel):
@ -2583,6 +2621,82 @@ class Qwen2VLModel(TextModel):
return [(self.map_tensor_name(name), data_torch)] return [(self.map_tensor_name(name), data_torch)]
@ModelBase.register("Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration")
class Qwen2VLVisionModel(VisionModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hparams["image_size"] = self.hparams.get("image_size", 560)
# rename config.json values
self.hparams["num_attention_heads"] = self.hparams.get("num_heads")
self.hparams["num_hidden_layers"] = self.hparams.get("depth")
if "embed_dim" in self.hparams: # qwen2vl
self.hparams["intermediate_size"] = self.hparams.get("hidden_size")
self.hparams["hidden_size"] = self.hparams.get("embed_dim")
def set_gguf_parameters(self):
super().set_gguf_parameters()
hparams = self.hparams
if self.global_config['model_type'] == 'qwen2_vl':
self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN2VL)
elif self.global_config['model_type'] == 'qwen2_5_vl':
self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL)
self.gguf_writer.add_vision_use_silu(True)
# find n_wa_pattern (window attention pattern)
fullatt_block_indexes = hparams.get("fullatt_block_indexes")
assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
n_wa_pattern = fullatt_block_indexes[0] + 1
# validate n_wa_pattern
for i in range(1, len(fullatt_block_indexes)):
if fullatt_block_indexes[i] - fullatt_block_indexes[i - 1] != n_wa_pattern:
raise ValueError(f"Invalid fullatt_block_indexes: {fullatt_block_indexes}")
self.gguf_writer.add_vision_n_wa_pattern(n_wa_pattern)
else:
raise ValueError(f"Unknown QwenVL model type: {self.global_config['model_type']}")
# default values below are taken from HF tranformers code
self.gguf_writer.add_vision_attention_layernorm_eps(self.global_config.get("rms_norm_eps", 1e-6))
def tensor_force_quant(self, name, new_name, bid, n_dims):
del bid, name, n_dims # unused
if ".patch_embd." in new_name:
return gguf.GGMLQuantizationType.F16
if ".position_embd." in new_name:
return gguf.GGMLQuantizationType.F32
return False
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
del bid # unused
if name.startswith("visual."):
# process visual tensors
# split QKV tensors if needed
if ".qkv." in name:
if data_torch.ndim == 2: # weight
c3, _ = data_torch.shape
else: # bias
c3 = data_torch.shape[0]
assert c3 % 3 == 0
c = c3 // 3
wq = data_torch[:c]
wk = data_torch[c: c * 2]
wv = data_torch[c * 2:]
return [
(self.map_tensor_name(name.replace("qkv", "q")), wq),
(self.map_tensor_name(name.replace("qkv", "k")), wk),
(self.map_tensor_name(name.replace("qkv", "v")), wv),
]
elif 'patch_embed.proj.weight' in name:
# split Conv3D into Conv2Ds
c1, c2, kt, kh, kw = data_torch.shape
del c1, c2, kh, kw # unused
assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
return [
(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]),
(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]),
]
else:
return [(self.map_tensor_name(name), data_torch)]
return [] # skip other tensors
@ModelBase.register("WavTokenizerDec") @ModelBase.register("WavTokenizerDec")
class WavTokenizerDecModel(TextModel): class WavTokenizerDecModel(TextModel):
model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC
@ -3318,29 +3432,7 @@ class BertModel(TextModel):
def set_gguf_parameters(self): def set_gguf_parameters(self):
super().set_gguf_parameters() super().set_gguf_parameters()
self.gguf_writer.add_causal_attention(False) self.gguf_writer.add_causal_attention(False)
self._try_set_pooling_type()
# get pooling path
pooling_path = None
module_path = self.dir_model / "modules.json"
if module_path.is_file():
with open(module_path, encoding="utf-8") as f:
modules = json.load(f)
for mod in modules:
if mod["type"] == "sentence_transformers.models.Pooling":
pooling_path = mod["path"]
break
# get pooling type
if pooling_path is not None:
with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
pooling = json.load(f)
if pooling["pooling_mode_mean_tokens"]:
pooling_type = gguf.PoolingType.MEAN
elif pooling["pooling_mode_cls_token"]:
pooling_type = gguf.PoolingType.CLS
else:
raise NotImplementedError("Only MEAN and CLS pooling types supported")
self.gguf_writer.add_pooling_type(pooling_type)
def set_vocab(self): def set_vocab(self):
tokens, toktypes, tokpre = self.get_vocab_base() tokens, toktypes, tokpre = self.get_vocab_base()
@ -3549,8 +3641,13 @@ class NomicBertModel(BertModel):
if self._tokenizer_is_xlmroberta: if self._tokenizer_is_xlmroberta:
self._xlmroberta_tokenizer_init() self._xlmroberta_tokenizer_init()
# the HF config claims n_ctx=8192, but it uses RoPE scaling npos, mtp = self.hparams["n_positions"], self.hparams.get("max_trained_positions", 2048)
self.hparams["n_ctx"] = 2048 if npos == 8192 and mtp == 2048:
self.hparams["n_positions"] = 2048 # nomic-embed-text v1 and v1.5 are trained for 2048 tokens.
elif npos == 2048 and mtp == 2048:
self.hparams["n_positions"] = 512 # nomic-embed-text-v2-moe is trained for 512 tokens.
else:
raise ValueError(f"unrecognized parameters: n_positions={npos}, max_trained_positions={mtp}")
assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu" assert self.hparams["activation_function"] == "gelu" if self.is_moe else "swiglu"
@ -5879,8 +5976,7 @@ def split_str_to_n_bytes(split_str: str) -> int:
return n return n
def get_model_architecture(dir_model: Path, model_type: ModelType, hparams: Any = None) -> str: def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> str:
hparams = ModelBase.load_hparams(dir_model) if hparams is None else hparams
text_config = hparams.get("text_config", {}) text_config = hparams.get("text_config", {})
vision_config = hparams.get("vision_config", {}) vision_config = hparams.get("vision_config", {})
arch = hparams["architectures"][0] arch = hparams["architectures"][0]
@ -5951,7 +6047,8 @@ def main() -> None:
with torch.inference_mode(): with torch.inference_mode():
output_type = ftype_map[args.outtype] output_type = ftype_map[args.outtype]
model_type = ModelType.VISION if args.mmproj else ModelType.TEXT model_type = ModelType.VISION if args.mmproj else ModelType.TEXT
model_architecture = get_model_architecture(dir_model, model_type) hparams = ModelBase.load_hparams(dir_model)
model_architecture = get_model_architecture(hparams, model_type)
logger.info(f"Model architecture: {model_architecture}") logger.info(f"Model architecture: {model_architecture}")
try: try:
model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type) model_class = ModelBase.from_model_architecture(model_architecture, model_type=model_type)

View file

@ -1,217 +0,0 @@
import argparse
from typing import Dict, List, Optional
import torch
import numpy as np
from gguf import *
from transformers import (
AutoProcessor,
Qwen2VLConfig,
Qwen2VLProcessor,
Qwen2VLForConditionalGeneration,
Qwen2_5_VLConfig, # type: ignore[reportAttributeAccessIssue]
Qwen2_5_VLForConditionalGeneration, # type: ignore[reportAttributeAccessIssue]
)
VISION = "clip.vision"
def k(raw_key: str, arch: str) -> str:
return raw_key.format(arch=arch)
def get_n_wa_pattern(fullatt_block_indexes: Optional[List[int]]):
if fullatt_block_indexes is None:
return 0
n_wa = fullatt_block_indexes[0]
for a, b in zip(fullatt_block_indexes, fullatt_block_indexes[1:]):
if b - a - 1 != n_wa:
raise ValueError(
f"window/full attention layer should have fix pattern of "
f"for each full-attention layer followed by {n_wa} window-attention layers"
)
return n_wa + 1
class VL2:
@staticmethod
def to_gguf_name(name: str) -> str:
og = name
name = name.replace("text_model", "t").replace("vision_model", "v")
name = name.replace("blocks", "blk").replace("embeddings.", "")
name = name.replace("attn.", "attn_")
name = name.replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("proj.", "out.")
# name = name.replace("layrnorm", "ln").replace("layer_norm", "ln").replace("layernorm", "ln")
name = name.replace("norm1", "ln1").replace("norm2", "ln2")
name = name.replace("merger.mlp", 'mm')
print(f"[to_gguf_name] {og} --> {name}")
return name
@classmethod
def find_vision_tensors(cls, qwen2vl, dtype) -> Dict[str, np.ndarray]:
vision_model = qwen2vl.visual
tensor_map = {}
for name, ten in vision_model.state_dict().items():
ten = ten.numpy()
if 'qkv' in name:
if ten.ndim == 2: # weight
c3, _ = ten.shape
else: # bias
c3 = ten.shape[0]
assert c3 % 3 == 0
c = c3 // 3
wq = ten[:c]
wk = ten[c: c * 2]
wv = ten[c * 2:]
tensor_map[cls.to_gguf_name(f"vision_model.{name}").replace("qkv", "q")] = wq
tensor_map[cls.to_gguf_name(f"vision_model.{name}").replace("qkv", "k")] = wk
tensor_map[cls.to_gguf_name(f"vision_model.{name}").replace("qkv", "v")] = wv
elif 'merger' in name:
if name.endswith("ln_q.weight"):
tensor_map['v.post_ln.weight'] = ten
elif name.endswith("ln_q.bias"):
tensor_map['v.post_ln.bias'] = ten
else:
# "merger.mlp.%d.weight/bias" --> "mm.%d.weight/bias"
tensor_map[cls.to_gguf_name(name)] = ten
elif 'patch_embed.proj.weight' in name:
# NOTE: split Conv3D into Conv2Ds
c1, c2, kt, kh, kw = ten.shape
assert kt == 2, "Current implmentation only support temporal_patch_size of 2"
tensor_map["v.patch_embd.weight"] = ten[:, :, 0, ...]
tensor_map["v.patch_embd.weight.1"] = ten[:, :, 1, ...]
else:
tensor_map[cls.to_gguf_name(f"vision_model.{name}")] = ten
for new_name, ten in tensor_map.items():
if ten.ndim <= 1 or new_name.endswith("_norm.weight"):
tensor_map[new_name] = ten.astype(np.float32)
else:
tensor_map[new_name] = ten.astype(dtype)
tensor_map["v.position_embd.weight"] = np.zeros([10, 10], dtype=np.float32) # dummy tensor, just here as a placeholder
return tensor_map
class VL25(VL2):
@staticmethod
def to_gguf_name(name: str) -> str:
og = name
name = name.replace("text_model", "t").replace("vision_model", "v")
name = name.replace("blocks", "blk").replace("embeddings.", "")
name = name.replace("attn.", "attn_")
name = name.replace("mlp.down_proj", "ffn_down").replace("mlp.up_proj", "ffn_up")
name = name.replace("mlp.gate_proj", "ffn_gate").replace("proj.", "out.")
name = name.replace("norm1", "ln1").replace("norm2", "ln2")
name = name.replace("merger.mlp", 'mm')
print(f"[vl25][to_gguf_name] {og} --> {name}")
return name
def main(args):
if args.data_type == 'fp32':
dtype = torch.float32
np_dtype = np.float32
ftype = 0
elif args.data_type == 'fp16':
dtype = torch.float16
np_dtype = np.float16
ftype = 1
else:
raise ValueError()
local_model = False
model_path = ""
model_name = args.model_name
print("model_name: ", model_name)
if args.model_type == "qwen2vl":
qwen2vl = Qwen2VLForConditionalGeneration.from_pretrained(
model_name, torch_dtype=dtype, device_map="cpu"
)
cfg: Qwen2VLConfig = qwen2vl.config # type: ignore[reportAssignmentType]
vcfg = cfg.vision_config
else:
qwen2vl = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_name, torch_dtype=dtype, device_map="cpu"
)
cfg: Qwen2_5_VLConfig = qwen2vl.config # type: ignore[reportAssignmentType]
vcfg = cfg.vision_config
if os.path.isdir(model_name):
local_model = True
if model_name.endswith(os.sep):
model_name = model_name[:-1]
model_path = model_name
model_name = os.path.basename(model_name)
fname_out = f"{model_name.replace('/', '-').lower()}-vision.gguf"
fout = GGUFWriter(path=fname_out, arch="clip")
fout.add_description("image encoder for Qwen2VL")
fout.add_file_type(ftype)
fout.add_bool("clip.has_text_encoder", False)
fout.add_bool("clip.has_vision_encoder", True)
fout.add_bool("clip.has_qwen2vl_merger", True)
print(cfg.vision_config)
if 'silu' in cfg.vision_config.hidden_act.lower():
fout.add_bool("clip.use_silu", True)
fout.add_bool("clip.use_gelu", False)
elif 'gelu' in cfg.vision_config.hidden_act.lower():
fout.add_bool("clip.use_silu", False)
fout.add_bool("clip.use_gelu", 'quick' not in cfg.vision_config.hidden_act.lower())
else:
raise ValueError()
if args.model_type == "qwen2.5vl":
fout.add_uint32("clip.vision.n_wa_pattern", get_n_wa_pattern(vcfg.fullatt_block_indexes))
fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.hidden_size)
fout.add_uint32("clip.vision.projection_dim", vcfg.out_hidden_size)
fout.add_string("clip.projector_type", "qwen2.5vl_merger")
else:
fout.add_string("clip.projector_type", "qwen2vl_merger")
fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.embed_dim)
fout.add_uint32("clip.vision.projection_dim", vcfg.hidden_size)
if args.model_type == "qwen2.5vl":
tensor_map = VL25.find_vision_tensors(qwen2vl, np_dtype)
else:
tensor_map = VL2.find_vision_tensors(qwen2vl, np_dtype)
for name, data in tensor_map.items():
fout.add_tensor(name, data)
fout.add_uint32("clip.vision.patch_size", vcfg.patch_size)
fout.add_uint32("clip.vision.image_size", 14 * 40) # some reasonable size that is divable by (14*2)
fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), vcfg.num_heads)
fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6)
fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), vcfg.depth)
fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), 0) # not sure what this does, put 0 here as a placeholder
fout.add_name(model_name)
"""
HACK: Since vision rope related parameter aren't stored in the `Qwen2VLConfig,
it will be hardcoded in the `clip_image_build_graph` from `clip.cpp`.
"""
if local_model:
processor: Qwen2VLProcessor = AutoProcessor.from_pretrained(model_path)
else:
processor: Qwen2VLProcessor = AutoProcessor.from_pretrained(model_name)
fout.add_array("clip.vision.image_mean", processor.image_processor.image_mean) # type: ignore[reportAttributeAccessIssue]
fout.add_array("clip.vision.image_std", processor.image_processor.image_std) # type: ignore[reportAttributeAccessIssue]
fout.write_header_to_file()
fout.write_kv_data_to_file()
fout.write_tensors_to_file()
fout.close()
print("save model as: ", fname_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_name", nargs='?', default="Qwen/Qwen2-VL-2B-Instruct")
parser.add_argument("--model_type", nargs='?', choices=['qwen2vl', 'qwen2.5vl'], default="qwen2vl")
parser.add_argument("--data_type", nargs='?', choices=['fp32', 'fp16'], default="fp32")
args = parser.parse_args()
main(args)

View file

@ -36,12 +36,6 @@ add_test() {
arr_tmpl+=("$tmpl") arr_tmpl+=("$tmpl")
} }
add_test_big() {
if [ "$RUN_BIG_TESTS" = true ]; then
add_test "$@"
fi
}
add_test "llama-mtmd-cli" "ggml-org/SmolVLM-500M-Instruct-GGUF:Q8_0" add_test "llama-mtmd-cli" "ggml-org/SmolVLM-500M-Instruct-GGUF:Q8_0"
add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M" add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-2.2B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0" add_test "llama-mtmd-cli" "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF:Q8_0"
@ -58,8 +52,16 @@ add_test "llama-mtmd-cli" "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M" add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M"
# to test the big models, run: ./tests.sh big # to test the big models, run: ./tests.sh big
add_test_big "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M" if [ "$RUN_BIG_TESTS" = true ]; then
add_test_big "llama-mtmd-cli" "ggml-org/Mistral-Small-3.1-24B-Instruct-2503-GGUF" "mistral-v7" add_test "llama-mtmd-cli" "ggml-org/pixtral-12b-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Mistral-Small-3.1-24B-Instruct-2503-GGUF" "mistral-v7"
add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2-VL-7B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-3B-Instruct-GGUF:Q4_K_M"
add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-7B-Instruct-GGUF:Q4_K_M"
# add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-32B-Instruct-GGUF:Q4_K_M" # does not work on my mac M3 Ultra
# add_test "llama-mtmd-cli" "ggml-org/Qwen2.5-VL-72B-Instruct-GGUF:Q4_K_M" # too big
fi
# these models always give the wrong answer, not sure why # these models always give the wrong answer, not sure why
# add_test "llama-mtmd-cli" "ggml-org/SmolVLM-Instruct-GGUF:Q4_K_M" # add_test "llama-mtmd-cli" "ggml-org/SmolVLM-Instruct-GGUF:Q4_K_M"

View file

@ -1054,6 +1054,493 @@ class tinyBLAS_Q0_AVX {
} \ } \
} \ } \
template <typename TA, typename TB, typename TC>
class tinyBLAS_BF16_PPC {
public:
tinyBLAS_BF16_PPC(int64_t k,
const TA *A, int64_t lda,
const TB *B, int64_t ldb,
TC *C, int64_t ldc,
int ith, int nth)
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
}
void matmul(int64_t m, int64_t n) {
mnpack(0, m, 0, n);
}
private:
void vector_permute_store(vec_t *c, int numVec, unsigned char *vecOffset) {
vec_t t[8], s[8];
vec_t swiz1 = {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23};
vec_t swiz2 = {8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31};
vec_t swiz3 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23};
vec_t swiz4 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31};
if (numVec == 2) {
t[0] = vec_perm(c[0], c[1], swiz1);
t[1] = vec_perm(c[2], c[3], swiz1);
s[0] = vec_perm(t[0], t[1], swiz3);
s[1] = vec_perm(t[0], t[1], swiz4);
vec_xst(s[0], 0, (vec_t*)vecOffset);
vec_xst(s[1], 0, (vec_t*)(vecOffset + 16));
} else if (numVec == 4) {
t[0] = vec_perm(c[0], c[1], swiz1);
t[1] = vec_perm(c[0], c[1], swiz2);
t[2] = vec_perm(c[2], c[3], swiz1);
t[3] = vec_perm(c[2], c[3], swiz2);
s[0] = vec_perm(t[0], t[2], swiz3);
s[1] = vec_perm(t[0], t[2], swiz4);
s[2] = vec_perm(t[1], t[3], swiz3);
s[3] = vec_perm(t[1], t[3], swiz4);
for (int i = 0; i < 4; ++i)
vec_xst(s[i], 0, (vec_t*)(vecOffset + i * 16));
} else if (numVec == 8) {
for (int i = 0; i < 4; i += 2) {
t[i+0] = vec_perm(c[i+0], c[i+1], swiz1);
t[i+1] = vec_perm(c[i+0], c[i+1], swiz2);
}
for (int i = 4; i < 8; i += 2) {
t[i+0] = vec_perm(c[i+0], c[i+1], swiz1);
t[i+1] = vec_perm(c[i+0], c[i+1], swiz2);
}
s[0] = vec_perm(t[0], t[2], swiz3);
s[1] = vec_perm(t[0], t[2], swiz4);
s[2] = vec_perm(t[1], t[3], swiz3);
s[3] = vec_perm(t[1], t[3], swiz4);
s[4] = vec_perm(t[4], t[6], swiz3);
s[5] = vec_perm(t[4], t[6], swiz4);
s[6] = vec_perm(t[5], t[7], swiz3);
s[7] = vec_perm(t[5], t[7], swiz4);
for (int i = 0; i < 8; ++i)
vec_xst(s[i], 0, (vec_t*)(vecOffset + i * 16));
}
}
void packNormal(const TA* a, int64_t lda, int rows, int cols, unsigned char* vec) {
int64_t i, j;
TA *aoffset = NULL;
unsigned char *vecOffset = NULL;
TA * aoffsets[8];
vector unsigned char c_arr[8];
aoffset = const_cast<TA*>(a);
vecOffset = vec;
j = (rows >> 3);
if (j > 0) {
do {
if (cols == 4) {
aoffsets[0] = aoffset;
for (int it = 1; it < 4; ++it)
aoffsets[it] = aoffsets[it-1] + lda;
aoffset += 4 * lda;
for (int i = 0; i < 4; ++i)
c_arr[i] = vec_xl(0, (vector unsigned char*)aoffsets[i]);
vector_permute_store(c_arr, 4, vecOffset);
for (int i = 0; i<4; i++)
aoffsets[i] = aoffsets[i]+lda;
vecOffset +=64;
}
i = (cols >> 3);
if (i > 0) {
aoffsets[0] = aoffset;
for (int it = 1; it < 8; ++it) {
aoffsets[it] = aoffsets[it-1] + lda;
}
aoffset += 8 * lda;
do {
for (int it = 0; it < 8; ++it)
c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]);
vector_permute_store(c_arr, 8, vecOffset);
for (int it = 0; it < 8; ++it)
aoffsets[it] = aoffsets[it] + 8*lda;
vecOffset += 128;
i--;
} while(i > 0);
}
j--;
} while(j > 0);
}
if (rows & 4) {
aoffsets[0] = aoffset;
for (int it = 1; it < 4; ++it)
aoffsets[it] = aoffsets[it-1] + lda;
aoffset += 4 * lda;
if (cols == 4) {
for (int it = 0; it < 4; ++it)
c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]);
vector_permute_store(c_arr, 2, vecOffset);
for (int it = 0; it< 4; it++)
aoffsets[it] = aoffsets[it] + lda;
vecOffset += 32;
}
i = (cols >> 3);
if (i > 0) {
do {
for (int it = 0; it < 4; ++it)
c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]);
vector_permute_store(c_arr, 4, vecOffset);
for (int it = 0; it< 4; it++)
aoffsets[it] = aoffsets[it] + 8*lda;
vecOffset += 64;
i--;
} while(i > 0);
}
}
if (rows & 3) {
aoffsets[0] = aoffset;
for (int it = 1; it < 4; ++it)
aoffsets[it] = aoffsets[it-1] + lda;
if (cols == 4) {
switch(rows) {
case 3: c_arr[2] = vec_xl(0, (vector unsigned char*)aoffsets[2]);
case 2: c_arr[1] = vec_xl(0, (vector unsigned char*)aoffsets[1]);
case 1: c_arr[0] = vec_xl(0, (vector unsigned char*)aoffsets[0]);
break;
}
vector_permute_store(c_arr, 2, vecOffset);
for (int it = 0; it< 4; it++)
aoffsets[it] = aoffsets[it] + lda;
vecOffset += 32;
}
i = (cols >> 3);
if (i > 0) {
do {
switch(rows) {
case 3: c_arr[2] = vec_xl(0, (vector unsigned char*)aoffsets[2]);
case 2: c_arr[1] = vec_xl(0, (vector unsigned char*)aoffsets[1]);
case 1: c_arr[0] = vec_xl(0, (vector unsigned char*)aoffsets[0]);
break;
}
vector_permute_store(c_arr, 4, vecOffset);
for (int it = 0; it <4; it++)
aoffsets[it] = aoffsets[it] + 8* lda;
vecOffset += 64;
i--;
} while(i > 0);
}
}
}
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int64_t mc, nc, mp, np;
int m_rem = MIN(m - m0, 8);
int n_rem = MIN(n - n0, 8);
if (m_rem >= 8 && n_rem >= 8) {
mc = 8;
nc = 8;
gemm<8,8>(m0, m, n0, n);
} else if (m_rem >= 4 && n_rem >= 8) {
mc = 4;
nc = 8;
gemm<4,8>(m0, m, n0, n);
} else if (m_rem >=8 && n_rem >=4){
mc = 8;
nc = 4;
gemm<8,4>(m0, m, n0, n);
} else if ((m_rem < 4) && (n_rem >= 8)) {
nc = 8;
switch(m_rem) {
case 1:
mc = 1;
gemm_Mx8<1>(m0, m, n0, n);
break;
case 2:
mc = 2;
gemm_Mx8<2>(m0, m, n0, n);
break;
case 3:
mc = 3;
gemm_Mx8<3>(m0, m, n0, n);
break;
default:
return;
}
} else if (m_rem >= 4 && n_rem >= 4) {
mc = 4;
nc = 4;
gemm_small<4, 4>(m0, m, n0, n);
} else if ((m_rem > 4) && (n_rem < 4)) {
mc = 4;
switch(n_rem) {
case 1:
nc = 1;
gemm_small<4, 1>(m0, m, n0, n);
break;
case 2:
nc = 2;
gemm_small<4, 2>(m0, m, n0, n);
break;
case 3:
nc = 3;
gemm_small<4, 3>(m0, m, n0, n);
break;
default:
return;
}
} else {
switch((m_rem << 4) | n_rem) {
case 0x43:
mc = 4;
nc = 3;
gemm_small<4, 3>(m0, m, n0, n);
break;
case 0x42:
mc = 4;
nc = 2;
gemm_small<4, 2>(m0, m, n0, n);
break;
case 0x41:
mc = 4;
nc = 1;
gemm_small<4, 1>(m0, m, n0, n);
break;
case 0x34:
mc = 3;
nc = 4;
gemm_small<3, 4>(m0, m, n0, n);
break;
case 0x33:
mc = 3;
nc = 3;
gemm_small<3, 3>(m0, m, n0, n);
break;
case 0x32:
mc = 3;
nc = 2;
gemm_small<3, 2>(m0, m, n0, n);
break;
case 0x31:
mc = 3;
nc = 1;
gemm_small<3, 1>(m0, m, n0, n);
break;
case 0x24:
mc = 2;
nc = 4;
gemm_small<2,4>(m0, m, n0, n);
break;
case 0x23:
mc = 2;
nc = 3;
gemm_small<2, 3>(m0, m, n0, n);
break;
case 0x22:
mc = 2;
nc = 2;
gemm_small<2, 2>(m0, m, n0, n);
break;
case 0x21:
mc = 2;
nc = 1;
gemm_small<2, 1>(m0, m, n0, n);
break;
case 0x14:
mc = 1;
nc = 4;
gemm_small<1, 4>(m0, m, n0, n);
break;
case 0x13:
mc = 1;
nc = 3;
gemm_small<1, 3>(m0, m, n0, n);
break;
case 0x12:
mc = 1;
nc = 2;
gemm_small<1, 2>(m0, m, n0, n);
break;
case 0x11:
mc = 1;
nc = 1;
gemm_small<1, 1>(m0, m, n0, n);
break;
default:
return;
}
}
mp = m0 + (m - m0) / mc * mc;
np = n0 + (n - n0) / nc * nc;
mnpack(mp, m, n0, np);
mnpack(m0, m, np, n);
}
void KERNEL_4x8(int64_t ii, int64_t jj) {
vec_t vec_A[4], vec_B[8] , vec_C[4];
acc_t acc_0, acc_1;
__builtin_mma_xxsetaccz(&acc_0);
__builtin_mma_xxsetaccz(&acc_1);
for (int l = 0; l < k; l+=8) {
packNormal((A+(ii*lda)+l), lda, 4, 8, (uint8_t*)vec_A);
packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B);
for (int x = 0; x < 4; x++) {
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
__builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x], vec_B[x+4]);
}
}
SAVE_ACC(&acc_0, ii, jj);
SAVE_ACC(&acc_1, ii, jj+4);
}
void KERNEL_8x4(int64_t ii, int64_t jj) {
vec_t vec_A[8], vec_B[4] , vec_C[4];
acc_t acc_0, acc_1;
__builtin_mma_xxsetaccz(&acc_0);
__builtin_mma_xxsetaccz(&acc_1);
for (int l = 0; l < k; l+=8) {
packNormal((A+(ii*lda)+l), lda, 8, 8, (uint8_t*)vec_A);
packNormal((B+(jj*ldb)+l), ldb, 8, 4, (uint8_t*)vec_B);
for (int x = 0; x < 4; x++) {
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
__builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x+4], vec_B[x]);
}
}
SAVE_ACC(&acc_0, ii, jj);
SAVE_ACC(&acc_1, ii+4, jj);
}
void KERNEL_8x8(int64_t ii, int64_t jj) {
vec_t vec_A[8], vec_B[8], vec_C[4];
acc_t acc_0, acc_1, acc_2, acc_3;
__builtin_mma_xxsetaccz(&acc_0);
__builtin_mma_xxsetaccz(&acc_1);
__builtin_mma_xxsetaccz(&acc_2);
__builtin_mma_xxsetaccz(&acc_3);
for (int l = 0; l < k; l+=8) {
packNormal(A+(ii*lda)+l, lda, 8, 8, (uint8_t*)vec_A);
packNormal(B+(jj*ldb)+l, ldb, 8, 8, (uint8_t*)vec_B);
for (int x = 0; x < 4; x++) {
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
__builtin_mma_xvbf16ger2pp(&acc_1, (vec_t)vec_A[x], (vec_t)vec_B[x+4]);
__builtin_mma_xvbf16ger2pp(&acc_2, (vec_t)vec_A[x+4], (vec_t)vec_B[x]);
__builtin_mma_xvbf16ger2pp(&acc_3, (vec_t)vec_A[x+4], (vec_t)vec_B[x+4]);
}
}
SAVE_ACC(&acc_0, ii, jj);
SAVE_ACC(&acc_1, ii, jj+4);
SAVE_ACC(&acc_2, ii+4, jj);
SAVE_ACC(&acc_3, ii+4, jj+4);
}
template<int RM, int RN>
void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int64_t ytiles = (m - m0) / RM;
int64_t xtiles = (n - n0) / RN;
int64_t tiles = xtiles * ytiles;
int64_t duty = (tiles + nth - 1) / nth;
int64_t start = duty * ith;
int64_t end = start + duty;
if (end > tiles)
end = tiles;
for (int64_t job = start; job < end; ++job) {
int64_t ii = m0 + job / xtiles * RM;
int64_t jj = n0 + job % xtiles * RN;
vec_t vec_C[4];
acc_t acc_0;
__builtin_mma_xxsetaccz(&acc_0);
vec_t vec_A[2], vec_B[2];
for (int l=0; l<k; l+=4) {
packNormal(A+(ii*lda)+l, lda, RM, 4, (uint8_t*)vec_A);
packNormal(B+(jj*ldb)+l, ldb, RN, 4, (uint8_t*)vec_B);
for (int x = 0; x<2; x++) {
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
}
}
__builtin_mma_disassemble_acc(vec_C, &acc_0);
for (int I = 0; I < RM; I++) {
for (int J = 0; J < RN; J++) {
*((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
}
}
}
}
template<int RM>
void gemm_Mx8(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int RN = 8;
int64_t ytiles = (m - m0) / RM;
int64_t xtiles = (n - n0) / RN;
int64_t tiles = xtiles * ytiles;
int64_t duty = (tiles + nth - 1) / nth;
int64_t start = duty * ith;
int64_t end = start + duty;
if (end > tiles)
end = tiles;
for (int64_t job = start; job < end; ++job) {
int64_t ii = m0 + job / xtiles * RM;
int64_t jj = n0 + job % xtiles * RN;
vec_t vec_C[4];
acc_t acc_0, acc_1;
__builtin_mma_xxsetaccz(&acc_0);
__builtin_mma_xxsetaccz(&acc_1);
vec_t vec_A[4], vec_B[8];
for (int l=0; l<k; l+=8) {
packNormal(A+(ii*lda)+l, lda, RM, 8, (uint8_t*)vec_A);
packNormal(B+(jj*ldb)+l, ldb, RN, 8, (uint8_t*)vec_B);
for (int x = 0; x<4; x++) {
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
__builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x], vec_B[x+4]);
}
}
__builtin_mma_disassemble_acc(vec_C, &acc_0);
for (int I = 0; I < RM; I++) {
for (int J = 0; J < 4; J++) {
*((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
}
}
__builtin_mma_disassemble_acc(vec_C, &acc_1);
for (int I = 0; I < RM; I++) {
for (int J = 0; J < 4; J++) {
*((TC*)(C+ii+((jj+4+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
}
}
}
}
template<int RM, int RN>
inline void kernel(int64_t ii, int64_t jj) {
if constexpr(RM == 4 && RN == 8) {
KERNEL_4x8(ii,jj);
} else if constexpr(RM == 8 && RN == 8) {
KERNEL_8x8(ii,jj);
} else if constexpr(RM == 8 && RN == 4) {
KERNEL_8x4(ii,jj);
} else {
static_assert(false, "RN/RM values not supported");
}
}
template <int RM, int RN>
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int64_t ytiles = (m - m0) / RM;
int64_t xtiles = (n - n0) / RN;
int64_t tiles = xtiles * ytiles;
int64_t duty = (tiles + nth - 1) / nth;
int64_t start = duty * ith;
int64_t end = start + duty;
if (end > tiles)
end = tiles;
for (int64_t job = start; job < end; ++job) {
int64_t ii = m0 + job / xtiles * RM;
int64_t jj = n0 + job % xtiles * RN;
kernel<RM, RN>(ii, jj);
}
}
const TA *const A;
const TB *const B;
TC *C;
const int64_t k;
const int64_t lda;
const int64_t ldb;
const int64_t ldc;
const int ith;
const int nth;
};
template <typename TA, typename TB, typename TC> template <typename TA, typename TB, typename TC>
class tinyBLAS_Q0_PPC { class tinyBLAS_Q0_PPC {
public: public:
@ -2202,6 +2689,7 @@ class tinyBLAS_PPC {
boffset = vec; boffset = vec;
j = (rows >> 3); j = (rows >> 3);
if (j > 0) { if (j > 0) {
do { do {
aoffset1 = aoffset; aoffset1 = aoffset;
aoffset2 = aoffset1 + lda; aoffset2 = aoffset1 + lda;
@ -2875,9 +3363,22 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64
(float *)C, ldc}; (float *)C, ldc};
return tb.matmul(m, n); return tb.matmul(m, n);
} }
#elif defined(__MMA__)
if ((k % 8))
return false;
if(Btype == GGML_TYPE_BF16) {
tinyBLAS_BF16_PPC<ggml_bf16_t, ggml_bf16_t, float> tb{ k,
(const ggml_bf16_t *)A, lda,
(const ggml_bf16_t *)B, ldb,
(float *)C, ldc,
params->ith, params->nth};
tb.matmul(m, n);
return true;
}
#endif #endif
return false; return false;
} }
case GGML_TYPE_F16: { case GGML_TYPE_F16: {
#if defined(__AVX512F__) #if defined(__AVX512F__)
if (Btype == GGML_TYPE_F16) { if (Btype == GGML_TYPE_F16) {

View file

@ -405,6 +405,8 @@ struct vk_device_struct {
vk_pipeline pipeline_rwkv_wkv6_f32; vk_pipeline pipeline_rwkv_wkv6_f32;
vk_pipeline pipeline_rwkv_wkv7_f32; vk_pipeline pipeline_rwkv_wkv7_f32;
vk_pipeline pipeline_opt_step_adamw_f32; vk_pipeline pipeline_opt_step_adamw_f32;
vk_pipeline pipeline_conv2d_dw_whcn_f32;
vk_pipeline pipeline_conv2d_dw_cwhn_f32;
// [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned} // [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned}
vk_pipeline pipeline_flash_attn_f32_f16_D64[GGML_TYPE_COUNT][2][2][2]; vk_pipeline pipeline_flash_attn_f32_f16_D64[GGML_TYPE_COUNT][2][2][2];
@ -717,6 +719,24 @@ struct vk_op_rwkv_wkv7_push_constants {
uint32_t H; uint32_t H;
}; };
struct vk_op_conv2d_dw_push_constants {
uint32_t ne;
uint32_t batches;
uint32_t channels;
uint32_t dst_w;
uint32_t dst_h;
uint32_t src_w;
uint32_t src_h;
uint32_t knl_w;
uint32_t knl_h;
int32_t stride_x;
int32_t stride_y;
int32_t pad_x;
int32_t pad_y;
int32_t dilation_x;
int32_t dilation_y;
};
struct vk_op_upscale_push_constants { struct vk_op_upscale_push_constants {
uint32_t ne; uint32_t a_offset; uint32_t d_offset; uint32_t ne; uint32_t a_offset; uint32_t d_offset;
uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03;
@ -2626,6 +2646,9 @@ static void ggml_vk_load_shaders(vk_device& device) {
ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f32, "conv2d_dw_whcn_f32", conv2d_dw_whcn_f32_len, conv2d_dw_whcn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f32, "conv2d_dw_cwhn_f32", conv2d_dw_cwhn_f32_len, conv2d_dw_cwhn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
for (auto &c : compiles) { for (auto &c : compiles) {
c.wait(); c.wait();
} }
@ -6161,6 +6184,15 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
return ctx->device->pipeline_leaky_relu_f32; return ctx->device->pipeline_leaky_relu_f32;
} }
return nullptr; return nullptr;
case GGML_OP_CONV_2D_DW:
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
if (ggml_is_contiguous(src1)) {
return ctx->device->pipeline_conv2d_dw_whcn_f32;
} else if (ggml_is_contiguous_channels(src1)) {
return ctx->device->pipeline_conv2d_dw_cwhn_f32;
}
}
return nullptr;
default: default:
return nullptr; return nullptr;
} }
@ -6187,6 +6219,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) {
case GGML_OP_REPEAT_BACK: case GGML_OP_REPEAT_BACK:
case GGML_OP_ROPE: case GGML_OP_ROPE:
case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM:
case GGML_OP_CONV_2D_DW:
return true; return true;
default: default:
return false; return false;
@ -6483,6 +6516,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
case GGML_OP_CONCAT: case GGML_OP_CONCAT:
case GGML_OP_UPSCALE: case GGML_OP_UPSCALE:
case GGML_OP_UNARY: case GGML_OP_UNARY:
case GGML_OP_CONV_2D_DW:
{ {
const uint32_t ne = ggml_nelements(dst); const uint32_t ne = ggml_nelements(dst);
if (ne > 262144) { if (ne > 262144) {
@ -7269,6 +7303,30 @@ static void ggml_vk_pool_2d(ggml_backend_vk_context * ctx, vk_context& subctx, c
}, dryrun); }, dryrun);
} }
static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
vk_op_conv2d_dw_push_constants p{};
p.ne = ggml_nelements(dst);
p.channels = dst->ne[2];
p.batches = dst->ne[3];
p.dst_w = dst->ne[0];
p.dst_h = dst->ne[1];
p.src_w = src1->ne[0];
p.src_h = src1->ne[1];
p.knl_w = src0->ne[0];
p.knl_h = src0->ne[1];
p.stride_x = dst->op_params[0];
p.stride_y = dst->op_params[1];
p.pad_x = dst->op_params[2];
p.pad_y = dst->op_params[3];
p.dilation_x = dst->op_params[4];
p.dilation_y = dst->op_params[5];
GGML_ASSERT(src0->ne[3] == p.channels);
GGML_ASSERT(src1->ne[3] == p.batches);
ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONV_2D_DW, std::move(p), dryrun);
}
static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { static void ggml_vk_leaky_relu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) {
const float * op_params = (const float *)dst->op_params; const float * op_params = (const float *)dst->op_params;
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f }, dryrun); ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_LEAKY_RELU, { (uint32_t)ggml_nelements(src0), 0, op_params[0], 0.0f }, dryrun);
@ -8289,6 +8347,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
case GGML_OP_IM2COL: case GGML_OP_IM2COL:
case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_TIMESTEP_EMBEDDING:
case GGML_OP_POOL_2D: case GGML_OP_POOL_2D:
case GGML_OP_CONV_2D_DW:
case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV6:
case GGML_OP_RWKV_WKV7: case GGML_OP_RWKV_WKV7:
case GGML_OP_LEAKY_RELU: case GGML_OP_LEAKY_RELU:
@ -8352,6 +8411,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
case GGML_OP_IM2COL: case GGML_OP_IM2COL:
case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_TIMESTEP_EMBEDDING:
case GGML_OP_POOL_2D: case GGML_OP_POOL_2D:
case GGML_OP_CONV_2D_DW:
case GGML_OP_LEAKY_RELU: case GGML_OP_LEAKY_RELU:
{ {
// These operations all go through ggml_vk_op_f32, so short-circuit and // These operations all go through ggml_vk_op_f32, so short-circuit and
@ -8525,6 +8585,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod
case GGML_OP_POOL_2D: case GGML_OP_POOL_2D:
ggml_vk_pool_2d(ctx, compute_ctx, src0, node, dryrun); ggml_vk_pool_2d(ctx, compute_ctx, src0, node, dryrun);
break;
case GGML_OP_CONV_2D_DW:
ggml_vk_conv_2d_dw(ctx, compute_ctx, src0, src1, node, dryrun);
break; break;
case GGML_OP_LEAKY_RELU: case GGML_OP_LEAKY_RELU:
ggml_vk_leaky_relu(ctx, compute_ctx, src0, node, dryrun); ggml_vk_leaky_relu(ctx, compute_ctx, src0, node, dryrun);
@ -8646,6 +8710,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor *
case GGML_OP_IM2COL: case GGML_OP_IM2COL:
case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_TIMESTEP_EMBEDDING:
case GGML_OP_POOL_2D: case GGML_OP_POOL_2D:
case GGML_OP_CONV_2D_DW:
case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV6:
case GGML_OP_RWKV_WKV7: case GGML_OP_RWKV_WKV7:
case GGML_OP_LEAKY_RELU: case GGML_OP_LEAKY_RELU:
@ -9623,6 +9688,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
case GGML_OP_COUNT_EQUAL: case GGML_OP_COUNT_EQUAL:
case GGML_OP_IM2COL: case GGML_OP_IM2COL:
case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_TIMESTEP_EMBEDDING:
case GGML_OP_CONV_2D_DW:
case GGML_OP_POOL_2D: case GGML_OP_POOL_2D:
case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV6:
case GGML_OP_RWKV_WKV7: case GGML_OP_RWKV_WKV7:

View file

@ -0,0 +1,105 @@
#version 450
#include "types.comp"
layout (push_constant) uniform parameter
{
uint ne;
uint batches;
uint channels;
uint dst_w;
uint dst_h;
uint src_w;
uint src_h;
uint knl_w;
uint knl_h;
int stride_x;
int stride_y;
int pad_x;
int pad_y;
int dilation_x;
int dilation_y;
} p;
layout (binding = 0) readonly buffer A {A_TYPE knl_data[];};
layout (binding = 1) readonly buffer B {B_TYPE src_data[];};
layout (binding = 2) writeonly buffer D {D_TYPE dst_data[];};
layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in;
FLOAT_TYPE conv_2d_dw_whcn(uint idx) {
uint i0 = idx / p.dst_w;
uint dst_x = idx - i0 * p.dst_w;
uint i1 = i0 / p.dst_h;
uint dst_y = i0 - i1 * p.dst_h;
uint n = i1 / p.channels;
uint c = i1 - n * p.channels;
uint src_i = n * p.channels * p.src_h * p.src_w + c * p.src_h * p.src_w;
uint knl_i = c * p.knl_h * p.knl_w;
FLOAT_TYPE sum = 0.0;
for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) {
uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y;
if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int
continue;
}
for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) {
uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x;
if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int
continue;
}
FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]);
FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]);
sum = fma(v, k, sum);
}
}
return sum;
}
FLOAT_TYPE conv_2d_dw_cwhn(uint idx) {
uint i0 = idx / p.channels;
uint c = idx - i0 * p.channels;
uint i1 = i0 / p.dst_w;
uint dst_x = i0 - i1 * p.dst_w;
uint n = i1 / p.dst_h;
uint dst_y = i1 - n * p.dst_h;
uint src_i = n * p.channels * p.src_h * p.src_w;
uint src_row = p.src_w * p.channels;
uint knl_row = p.knl_w * p.channels;
FLOAT_TYPE sum = 0.0;
for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) {
uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y;
if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int
continue;
}
for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) {
uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x;
if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int
continue;
}
FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]);
FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]);
sum = fma(v, k, sum);
}
}
return sum;
}
void main() {
uint idx = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x;
if (idx >= p.ne) {
return;
}
FLOAT_TYPE result =
#ifdef WHCN
conv_2d_dw_whcn(idx);
#else
conv_2d_dw_cwhn(idx);
#endif
dst_data[idx] = D_TYPE(result);
}

View file

@ -598,6 +598,9 @@ void process_shaders() {
string_to_spv("opt_step_adamw_f32", "opt_step_adamw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); string_to_spv("opt_step_adamw_f32", "opt_step_adamw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}}));
string_to_spv("conv2d_dw_whcn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"WHCN", "1"}}));
string_to_spv("conv2d_dw_cwhn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"CWHN", "1"}}));
for (auto &c : compiles) { for (auto &c : compiles) {
c.wait(); c.wait();
} }

View file

@ -234,6 +234,7 @@ class Keys:
SPATIAL_MERGE_SIZE = "clip.vision.spatial_merge_size" SPATIAL_MERGE_SIZE = "clip.vision.spatial_merge_size"
USE_GELU = "clip.use_gelu" USE_GELU = "clip.use_gelu"
USE_SILU = "clip.use_silu" USE_SILU = "clip.use_silu"
N_WA_PATTERN = "clip.vision.n_wa_pattern" # used by qwen2.5vl
class Attention: class Attention:
HEAD_COUNT = "clip.vision.attention.head_count" HEAD_COUNT = "clip.vision.attention.head_count"
@ -2032,6 +2033,8 @@ class PoolingType(IntEnum):
NONE = 0 NONE = 0
MEAN = 1 MEAN = 1
CLS = 2 CLS = 2
LAST = 3
RANK = 4
class GGMLQuantizationType(IntEnum): class GGMLQuantizationType(IntEnum):
@ -2162,6 +2165,8 @@ class VisionProjectorType:
GEMMA3 = "gemma3" GEMMA3 = "gemma3"
IDEFICS3 = "idefics3" IDEFICS3 = "idefics3"
PIXTRAL = "pixtral" PIXTRAL = "pixtral"
QWEN2VL = "qwen2vl_merger"
QWEN25VL = "qwen2.5vl_merger"
# Items here are (block size, type size) # Items here are (block size, type size)

View file

@ -984,6 +984,9 @@ class GGUFWriter:
def add_vision_projector_scale_factor(self, value: int) -> None: def add_vision_projector_scale_factor(self, value: int) -> None:
self.add_uint32(Keys.ClipVision.Projector.SCALE_FACTOR, value) self.add_uint32(Keys.ClipVision.Projector.SCALE_FACTOR, value)
def add_vision_n_wa_pattern(self, value: int) -> None:
self.add_uint32(Keys.ClipVision.N_WA_PATTERN, value)
def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes: def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes:
pack_prefix = '' pack_prefix = ''
if not skip_pack_prefix: if not skip_pack_prefix:

View file

@ -896,6 +896,7 @@ class TensorNameMap:
MODEL_TENSOR.V_MMPROJ: ( MODEL_TENSOR.V_MMPROJ: (
"multi_modal_projector.linear_{bid}", "multi_modal_projector.linear_{bid}",
"visual.merger.mlp.{bid}", # qwen2vl
), ),
MODEL_TENSOR.V_MMPROJ_FC: ( MODEL_TENSOR.V_MMPROJ_FC: (
@ -919,6 +920,7 @@ class TensorNameMap:
"vpm.embeddings.patch_embedding", "vpm.embeddings.patch_embedding",
"model.vision_model.embeddings.patch_embedding", # SmolVLM "model.vision_model.embeddings.patch_embedding", # SmolVLM
"vision_tower.patch_conv", # pixtral "vision_tower.patch_conv", # pixtral
"visual.patch_embed.proj", # qwen2vl
), ),
MODEL_TENSOR.V_ENC_EMBD_POS: ( MODEL_TENSOR.V_ENC_EMBD_POS: (
@ -932,6 +934,7 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.self_attn.q_proj", "vpm.encoder.layers.{bid}.self_attn.q_proj",
"model.vision_model.encoder.layers.{bid}.self_attn.q_proj", # SmolVLM "model.vision_model.encoder.layers.{bid}.self_attn.q_proj", # SmolVLM
"vision_tower.transformer.layers.{bid}.attention.q_proj", # pixtral "vision_tower.transformer.layers.{bid}.attention.q_proj", # pixtral
"visual.blocks.{bid}.attn.q", # qwen2vl, generated
), ),
MODEL_TENSOR.V_ENC_ATTN_K: ( MODEL_TENSOR.V_ENC_ATTN_K: (
@ -939,6 +942,7 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.self_attn.k_proj", "vpm.encoder.layers.{bid}.self_attn.k_proj",
"model.vision_model.encoder.layers.{bid}.self_attn.k_proj", # SmolVLM "model.vision_model.encoder.layers.{bid}.self_attn.k_proj", # SmolVLM
"vision_tower.transformer.layers.{bid}.attention.k_proj", # pixtral "vision_tower.transformer.layers.{bid}.attention.k_proj", # pixtral
"visual.blocks.{bid}.attn.k", # qwen2vl, generated
), ),
MODEL_TENSOR.V_ENC_ATTN_V: ( MODEL_TENSOR.V_ENC_ATTN_V: (
@ -946,6 +950,7 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.self_attn.v_proj", "vpm.encoder.layers.{bid}.self_attn.v_proj",
"model.vision_model.encoder.layers.{bid}.self_attn.v_proj", # SmolVLM "model.vision_model.encoder.layers.{bid}.self_attn.v_proj", # SmolVLM
"vision_tower.transformer.layers.{bid}.attention.v_proj", # pixtral "vision_tower.transformer.layers.{bid}.attention.v_proj", # pixtral
"visual.blocks.{bid}.attn.v", # qwen2vl, generated
), ),
MODEL_TENSOR.V_ENC_INPUT_NORM: ( MODEL_TENSOR.V_ENC_INPUT_NORM: (
@ -953,6 +958,7 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.layer_norm1", "vpm.encoder.layers.{bid}.layer_norm1",
"model.vision_model.encoder.layers.{bid}.layer_norm1", # SmolVLM "model.vision_model.encoder.layers.{bid}.layer_norm1", # SmolVLM
"vision_tower.transformer.layers.{bid}.attention_norm", # pixtral "vision_tower.transformer.layers.{bid}.attention_norm", # pixtral
"visual.blocks.{bid}.norm1", # qwen2vl
), ),
MODEL_TENSOR.V_ENC_OUTPUT: ( MODEL_TENSOR.V_ENC_OUTPUT: (
@ -960,6 +966,7 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.self_attn.out_proj", "vpm.encoder.layers.{bid}.self_attn.out_proj",
"model.vision_model.encoder.layers.{bid}.self_attn.out_proj", # SmolVLM "model.vision_model.encoder.layers.{bid}.self_attn.out_proj", # SmolVLM
"vision_tower.transformer.layers.{bid}.attention.o_proj", # pixtral "vision_tower.transformer.layers.{bid}.attention.o_proj", # pixtral
"visual.blocks.{bid}.attn.proj", # qwen2vl
), ),
MODEL_TENSOR.V_ENC_OUTPUT_NORM: ( MODEL_TENSOR.V_ENC_OUTPUT_NORM: (
@ -967,17 +974,24 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.layer_norm2", "vpm.encoder.layers.{bid}.layer_norm2",
"model.vision_model.encoder.layers.{bid}.layer_norm2", # SmolVLM "model.vision_model.encoder.layers.{bid}.layer_norm2", # SmolVLM
"vision_tower.transformer.layers.{bid}.ffn_norm", # pixtral "vision_tower.transformer.layers.{bid}.ffn_norm", # pixtral
"visual.blocks.{bid}.norm2", # qwen2vl
), ),
# some namings are messed up because the original llava code swapped fc1 and fc2
# we have no better way to fix it, just be careful
# new models like pixtral use the correct naming
MODEL_TENSOR.V_ENC_FFN_UP: ( MODEL_TENSOR.V_ENC_FFN_UP: (
"vision_tower.vision_model.encoder.layers.{bid}.mlp.fc1", "vision_tower.vision_model.encoder.layers.{bid}.mlp.fc1",
"vpm.encoder.layers.{bid}.mlp.fc1", "vpm.encoder.layers.{bid}.mlp.fc1",
"model.vision_model.encoder.layers.{bid}.mlp.fc2", # SmolVLM, gemma3 (note: name is swapped) "model.vision_model.encoder.layers.{bid}.mlp.fc2", # SmolVLM, gemma3 (note: name is swapped)
"vision_tower.transformer.layers.{bid}.feed_forward.up_proj", # pixtral "vision_tower.transformer.layers.{bid}.feed_forward.up_proj", # pixtral
"visual.blocks.{bid}.mlp.fc2", # qwen2vl
"visual.blocks.{bid}.mlp.up_proj", # qwen2.5vl
), ),
MODEL_TENSOR.V_ENC_FFN_GATE: ( MODEL_TENSOR.V_ENC_FFN_GATE: (
"vision_tower.transformer.layers.{bid}.feed_forward.gate_proj", # pixtral "vision_tower.transformer.layers.{bid}.feed_forward.gate_proj", # pixtral
"visual.blocks.{bid}.mlp.gate_proj", # qwen2.5vl
), ),
MODEL_TENSOR.V_ENC_FFN_DOWN: ( MODEL_TENSOR.V_ENC_FFN_DOWN: (
@ -985,6 +999,8 @@ class TensorNameMap:
"vpm.encoder.layers.{bid}.mlp.fc2", "vpm.encoder.layers.{bid}.mlp.fc2",
"model.vision_model.encoder.layers.{bid}.mlp.fc1", # SmolVLM, gemma3 (note: name is swapped) "model.vision_model.encoder.layers.{bid}.mlp.fc1", # SmolVLM, gemma3 (note: name is swapped)
"vision_tower.transformer.layers.{bid}.feed_forward.down_proj", # pixtral "vision_tower.transformer.layers.{bid}.feed_forward.down_proj", # pixtral
"visual.blocks.{bid}.mlp.fc1", # qwen2vl
"visual.blocks.{bid}.mlp.down_proj", # qwen2.5vl
), ),
MODEL_TENSOR.V_PRE_NORM: ( MODEL_TENSOR.V_PRE_NORM: (
@ -995,6 +1011,7 @@ class TensorNameMap:
MODEL_TENSOR.V_POST_NORM: ( MODEL_TENSOR.V_POST_NORM: (
"vision_tower.vision_model.post_layernorm", "vision_tower.vision_model.post_layernorm",
"model.vision_model.post_layernorm", # SmolVLM "model.vision_model.post_layernorm", # SmolVLM
"visual.merger.ln_q", # qwen2vl
), ),
MODEL_TENSOR.V_MM_INP_PROJ: ( MODEL_TENSOR.V_MM_INP_PROJ: (

View file

@ -189,7 +189,7 @@ llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
return ubatch; return ubatch;
} }
void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) { llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) {
GGML_ASSERT(batch.n_tokens >= 0); GGML_ASSERT(batch.n_tokens >= 0);
this->batch = &batch; this->batch = &batch;
this->n_embd = n_embd; this->n_embd = n_embd;
@ -203,6 +203,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
for (size_t i = 0; i < n_tokens; ++i) { for (size_t i = 0; i < n_tokens; ++i) {
ids[i] = i; ids[i] = i;
} }
if (simple_split) { if (simple_split) {
seq.resize(1); seq.resize(1);
llama_sbatch_seq & s = seq[0]; llama_sbatch_seq & s = seq[0];
@ -212,6 +213,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
s.length = n_tokens; s.length = n_tokens;
return; return;
} }
std::sort(ids.begin(), ids.end(), std::sort(ids.begin(), ids.end(),
[&batch](size_t a, size_t b) { [&batch](size_t a, size_t b) {
int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1; int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
@ -239,6 +241,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
return n_seq_a > n_seq_b; return n_seq_a > n_seq_b;
} }
); );
// init seq // init seq
llama_sbatch_seq * last_seq = nullptr; llama_sbatch_seq * last_seq = nullptr;
@ -262,6 +265,7 @@ void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool sim
seq.push_back(new_seq); seq.push_back(new_seq);
last_seq = &seq.back(); last_seq = &seq.back();
} }
// keep shared prompts first at the end, then sort by length descending. // keep shared prompts first at the end, then sort by length descending.
std::sort(seq.begin(), seq.end(), std::sort(seq.begin(), seq.end(),
[](llama_sbatch_seq & a, llama_sbatch_seq & b) { [](llama_sbatch_seq & a, llama_sbatch_seq & b) {

View file

@ -70,7 +70,8 @@ struct llama_sbatch {
// sequence-wise split // sequence-wise split
llama_ubatch split_seq(size_t n_ubatch); llama_ubatch split_seq(size_t n_ubatch);
void from_batch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false); llama_sbatch() = default;
llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false);
}; };
// temporary allocate memory for the input batch if needed // temporary allocate memory for the input batch if needed

View file

@ -6,11 +6,9 @@
#include "llama-model.h" #include "llama-model.h"
#include "llama-kv-cache.h" #include "llama-kv-cache.h"
#include <cassert>
#include <cstring> #include <cstring>
#include <stdexcept> #include <stdexcept>
#include <cinttypes> #include <cinttypes>
#include <cmath>
// //
// llama_context // llama_context
@ -177,44 +175,13 @@ llama_context::llama_context(
} }
// init the memory module // init the memory module
// TODO: for now, always create a unified KV cache
if (!hparams.vocab_only) { if (!hparams.vocab_only) {
kv_self.reset(static_cast<llama_kv_cache_unified *>(model.create_memory())); llama_memory_params params_mem = {
/*.type_k =*/ params.type_k,
/*.type_v =*/ params.type_v,
};
LLAMA_LOG_DEBUG("%s: n_ctx = %u\n", __func__, cparams.n_ctx); memory.reset(model.create_memory(params_mem, cparams));
cparams.n_ctx = GGML_PAD(cparams.n_ctx, kv_self->get_padding(cparams));
LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
uint32_t kv_size = cparams.n_ctx;
ggml_type type_k = params.type_k;
ggml_type type_v = params.type_v;
if (llama_model_is_recurrent(&model)) {
// Mamba needs at least as many KV cells as there are sequences kept at any time
kv_size = std::max((uint32_t) 1, params.n_seq_max);
// it's probably best to keep as much precision as possible for the states
type_k = GGML_TYPE_F32; // required by ggml_ssm_conv for Mamba's conv_states
type_v = GGML_TYPE_F32; // required by ggml_ssm_scan for Mamba's ssm_states
}
GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
if (!kv_self->init(model, cparams, type_k, type_v, kv_size, cparams.offload_kqv)) {
throw std::runtime_error("failed to initialize self-attention cache");
}
{
const size_t memory_size_k = kv_self->size_k_bytes();
const size_t memory_size_v = kv_self->size_v_bytes();
LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
(float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
}
} }
// init backends // init backends
@ -305,7 +272,9 @@ llama_context::llama_context(
int n_nodes_tg = -1; int n_nodes_tg = -1;
// simulate full KV cache // simulate full KV cache
kv_self->n = kv_self->size; llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
kv_self->set_full();
cross.v_embd.clear(); cross.v_embd.clear();
@ -427,6 +396,18 @@ const llama_model & llama_context::get_model() const {
return model; return model;
} }
const llama_cparams & llama_context::get_cparams() const {
return cparams;
}
ggml_backend_sched_t llama_context::get_sched() const {
return sched.get();
}
ggml_context * llama_context::get_ctx_compute() const {
return ctx_compute.get();
}
uint32_t llama_context::n_ctx() const { uint32_t llama_context::n_ctx() const {
return cparams.n_ctx; return cparams.n_ctx;
} }
@ -456,338 +437,21 @@ uint32_t llama_context::n_threads_batch() const {
} }
llama_kv_cache * llama_context::get_kv_self() { llama_kv_cache * llama_context::get_kv_self() {
return kv_self.get(); llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
return kv_self;
} }
const llama_kv_cache * llama_context::get_kv_self() const { const llama_kv_cache * llama_context::get_kv_self() const {
return kv_self.get(); llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
} return kv_self;
ggml_tensor * llama_context::build_rope_shift(
ggml_context * ctx0,
ggml_tensor * cur,
ggml_tensor * shift,
ggml_tensor * factors,
float freq_base,
float freq_scale) const {
const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
const auto & yarn_ext_factor = cparams.yarn_ext_factor;
const auto & yarn_beta_fast = cparams.yarn_beta_fast;
const auto & yarn_beta_slow = cparams.yarn_beta_slow;
const auto & hparams = model.hparams;
const auto & n_rot = hparams.n_rot;
const auto & rope_type = hparams.rope_type;
// See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly.
// See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor;
ggml_tensor * tmp;
if (ggml_is_quantized(cur->type)) {
// dequantize to f32 -> RoPE -> quantize back
tmp = ggml_cast(ctx0, cur, GGML_TYPE_F32);
tmp = ggml_rope_ext(ctx0, tmp,
shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
tmp = ggml_cpy(ctx0, tmp, cur);
} else {
// we rotate only the first n_rot dimensions
tmp = ggml_rope_ext_inplace(ctx0, cur,
shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
}
return tmp;
}
class llm_graph_input_k_shift : public llm_graph_input_i {
public:
llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
virtual ~llm_graph_input_k_shift() = default;
void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * k_shift; // I32 [kv_size]
const llama_kv_cache_unified * kv_self;
};
void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
GGML_UNUSED(ubatch);
if (k_shift) {
assert(ggml_backend_buffer_is_host(k_shift->buffer));
int32_t * data = (int32_t *) k_shift->data;
for (uint32_t i = 0; i < kv_self->size; ++i) {
data[i] = kv_self->cells[i].delta;
}
}
}
llm_graph_result_ptr llama_context::build_kv_self_shift(
ggml_context * ctx0,
ggml_cgraph * gf) const {
auto res = std::make_unique<llm_graph_result>();
const auto & hparams = model.hparams;
const auto & n_layer = hparams.n_layer;
const auto & n_embd_head_k = hparams.n_embd_head_k;
//const auto & n_embd_head_v = hparams.n_embd_head_v;
//GGML_ASSERT(kv_self->size == n_ctx);
auto inp = std::make_unique<llm_graph_input_k_shift>(kv_self.get());
inp->k_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_ctx);
ggml_set_input(inp->k_shift);
for (uint32_t il = 0; il < n_layer; ++il) {
const int64_t n_head_kv = hparams.n_head_kv(il);
const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
const bool is_swa = hparams.is_swa(il);
// note: the swa rope params could become part of the cparams in the future
// if we decide to make them configurable, like the non-sliding ones
const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base;
const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale;
ggml_tensor * rope_factors = kv_self->cbs.get_rope_factors(n_ctx_per_seq(), il);
ggml_tensor * k =
ggml_view_3d(ctx0, kv_self->k_l[il],
n_embd_head_k, n_head_kv, kv_self->size,
ggml_row_size(kv_self->k_l[il]->type, n_embd_head_k),
ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa),
0);
ggml_tensor * cur = build_rope_shift(ctx0, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
ggml_build_forward_expand(gf, cur);
}
res->add_input(std::move(inp));
return res;
}
llm_graph_result_ptr llama_context::build_kv_self_defrag(
ggml_context * ctx0,
ggml_cgraph * gf) const {
auto res = std::make_unique<llm_graph_result>();
const auto & hparams = model.hparams;
const auto & ids = kv_self->defrag_info.ids;
#if 0
// CPU defrag
//
// TODO: optimizations are possible:
// - multiple threads
// - avoid copying to the host memory when already there
//
// likely not worth the effort, as we have ggml_graph based defrag
//
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
const uint32_t kv_size = size;
std::vector<uint8_t> buf_k;
std::vector<uint8_t> buf_v;
for (uint32_t il = 0; il < n_layer; ++il) {
const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
const size_t k_size = ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size);
const size_t v_size_el = ggml_type_size(v_l[il]->type);
const size_t v_size = ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size);
buf_k.resize(k_size);
buf_v.resize(v_size);
ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size());
ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size());
// batch move [i, i+nm) to [id, id+nm)
// note: cells can move only to a lower index
for (uint32_t i = 0; i < n_kv; ++i) {
const uint32_t id = ids[i];
if (i == id || id == n_kv) {
continue;
}
uint32_t nm = 1;
while (i + nm < n_kv && ids[i + nm] == id + nm) {
nm++;
}
// move keys
{
const int64_t os = i*k_size_row;
const int64_t od = id*k_size_row;
memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
}
// move values (note: they are transposed)
{
const int64_t os = i;
const int64_t od = id;
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
}
}
i += nm - 1;
}
ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size());
ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size());
}
#else
for (uint32_t i = 0; i < ids.size(); ++i) {
const uint32_t id = ids[i];
if (i == id || id == ids.size()) {
continue;
}
uint32_t nm = 1;
while (i + nm < ids.size() && ids[i + nm] == id + nm) {
nm++;
}
for (uint32_t il = 0; il < hparams.n_layer; ++il) { // NOLINT
const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
ggml_tensor * view_k_src = ggml_view_2d(ctx0, kv_self->k_l[il],
n_embd_k_gqa, nm,
ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa),
ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa*i));
ggml_tensor * view_k_dst = ggml_view_2d(ctx0, kv_self->k_l[il],
n_embd_k_gqa, nm,
ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa),
ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa*id));
ggml_tensor * view_v_src;
ggml_tensor * view_v_dst;
if (cparams.flash_attn) {
// NOTE: the V cache is not transposed when using flash attention
view_v_src = ggml_view_2d(ctx0, kv_self->v_l[il],
n_embd_v_gqa, nm,
ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa),
ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa*i));
view_v_dst = ggml_view_2d(ctx0, kv_self->v_l[il],
n_embd_v_gqa, nm,
ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa),
ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa*id));
} else {
view_v_src = ggml_view_2d(ctx0, kv_self->v_l[il],
nm, n_embd_v_gqa,
ggml_row_size(kv_self->v_l[il]->type, kv_self->size),
ggml_row_size(kv_self->v_l[il]->type, i));
view_v_dst = ggml_view_2d(ctx0, kv_self->v_l[il],
nm, n_embd_v_gqa,
ggml_row_size(kv_self->v_l[il]->type, kv_self->size),
ggml_row_size(kv_self->v_l[il]->type, id));
}
ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst));
}
i += nm - 1;
}
//LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
#endif
return res;
} }
void llama_context::kv_self_update() { void llama_context::kv_self_update() {
auto & kv = kv_self;
bool need_reserve = false; bool need_reserve = false;
if (kv->has_shift) { llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
if (!kv->get_can_shift()) {
printf("\nWARNING: The current context does not support K-shift!\n");
} else {
// LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__); need_reserve = kv_self->update(*this);
// apply K-shift if needed
if (model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
ggml_backend_sched_reset(sched.get());
auto * gf = graph_init();
auto res = build_kv_self_shift(ctx_compute.get(), gf);
ggml_backend_sched_alloc_graph(sched.get(), gf);
res->set_inputs(nullptr);
graph_compute(gf, false);
need_reserve = true;
}
{
kv->has_shift = false;
for (uint32_t i = 0; i < kv->size; ++i) {
kv->cells[i].delta = 0;
}
}
}
}
// defragment the KV cache if needed
if (kv->do_defrag) {
LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__);
if (kv->defrag_prepare(graph_max_nodes())) {
ggml_backend_sched_reset(sched.get());
auto * gf = graph_init();
auto res = build_kv_self_defrag(ctx_compute.get(), gf);
ggml_backend_sched_alloc_graph(sched.get(), gf);
res->set_inputs(nullptr);
graph_compute(gf, false);
need_reserve = true;
}
kv->do_defrag = false;
}
// reserve a worst case graph if needed // reserve a worst case graph if needed
if (need_reserve) { if (need_reserve) {
@ -798,7 +462,7 @@ void llama_context::kv_self_update() {
uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
// simulate full KV cache // simulate full KV cache
kv_self->n = kv_self->size; kv_self->set_full();
llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
@ -819,9 +483,6 @@ enum llama_pooling_type llama_context::pooling_type() const {
} }
float * llama_context::get_logits() { float * llama_context::get_logits() {
// reorder logits for backward compatibility
output_reorder();
return logits; return logits;
} }
@ -864,9 +525,6 @@ float * llama_context::get_logits_ith(int32_t i) {
} }
float * llama_context::get_embeddings() { float * llama_context::get_embeddings() {
// reorder embeddings for backward compatibility
output_reorder();
return embd; return embd;
} }
@ -1018,8 +676,8 @@ int llama_context::encode(llama_batch & inp_batch) {
} }
// temporary allocate memory for the input batch if needed // temporary allocate memory for the input batch if needed
// TODO: this is incorrect for multiple sequences because pos_max() is the maximum across all sequences // note: during encode, we always pass the full sequence starting from pos = 0
llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->pos_max() + 1); llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : 0);
const llama_batch & batch = batch_allocr.batch; const llama_batch & batch = batch_allocr.batch;
const int32_t n_tokens = batch.n_tokens; const int32_t n_tokens = batch.n_tokens;
@ -1048,7 +706,7 @@ int llama_context::encode(llama_batch & inp_batch) {
const int64_t n_embd = hparams.n_embd; const int64_t n_embd = hparams.n_embd;
sbatch.from_batch(batch, n_embd, /* simple_split */ true, /* logits_all */ true); llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true, /* logits_all */ true);
const llama_ubatch ubatch = sbatch.split_simple(n_tokens); const llama_ubatch ubatch = sbatch.split_simple(n_tokens);
@ -1182,9 +840,11 @@ int llama_context::decode(llama_batch & inp_batch) {
return -1; return -1;
} }
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
// temporary allocate memory for the input batch if needed // temporary allocate memory for the input batch if needed
// TODO: this is incorrect for multiple sequences because pos_max() is the maximum across all sequences // TODO: this is incorrect for multiple sequences because get_pos_max() is the maximum across all sequences
llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->pos_max() + 1); llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->get_pos_max() + 1);
const llama_batch & batch = batch_allocr.batch; const llama_batch & batch = batch_allocr.batch;
@ -1196,7 +856,7 @@ int llama_context::decode(llama_batch & inp_batch) {
const int64_t n_tokens_all = batch.n_tokens; const int64_t n_tokens_all = batch.n_tokens;
const int64_t n_embd = hparams.n_embd; const int64_t n_embd = hparams.n_embd;
llama_kv_cache_guard kv_guard(kv_self.get()); llama_kv_cache_guard kv_guard(kv_self);
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
@ -1237,11 +897,7 @@ int llama_context::decode(llama_batch & inp_batch) {
n_outputs_all = 1; n_outputs_all = 1;
} }
const bool logits_all = n_outputs_all == n_tokens_all; llama_sbatch sbatch = kv_self->sbatch_init(batch, /* logits_all */ n_outputs_all == n_tokens_all);
sbatch.from_batch(batch, n_embd,
/* simple_split */ !kv_self->recurrent,
/* logits_all */ logits_all);
// reserve output buffer // reserve output buffer
if (output_reserve(n_outputs_all) < n_outputs_all) { if (output_reserve(n_outputs_all) < n_outputs_all) {
@ -1255,22 +911,7 @@ int llama_context::decode(llama_batch & inp_batch) {
int64_t n_outputs_prev = 0; int64_t n_outputs_prev = 0;
while (sbatch.n_tokens > 0) { while (sbatch.n_tokens > 0) {
llama_ubatch ubatch = llama_ubatch(); llama_ubatch ubatch = kv_self->ubatch_next(sbatch, cparams.n_ubatch, embd_pooled);
const auto & n_ubatch = cparams.n_ubatch;
if (kv_self->recurrent) {
if (embd_pooled) {
// Pooled embeddings cannot be split across ubatches (yet)
ubatch = sbatch.split_seq(cparams.n_ubatch);
} else {
// recurrent model architectures are easier to implement
// with equal-length sequences
ubatch = sbatch.split_equal(cparams.n_ubatch);
}
} else {
ubatch = sbatch.split_simple(n_ubatch);
}
// count the outputs in this u_batch // count the outputs in this u_batch
{ {
@ -1290,24 +931,12 @@ int llama_context::decode(llama_batch & inp_batch) {
} }
// find KV slot // find KV slot
{
if (!kv_self->find_slot(ubatch)) { if (!kv_self->find_slot(ubatch)) {
LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens);
return 1; return 1;
} }
if (!kv_self->recurrent) {
// a heuristic, to avoid attending the full cache if it is not yet utilized
// after enough generations, the benefit from this heuristic disappears
// if we start defragmenting the cache, the benefit from this will be more important
const uint32_t pad = kv_self->get_padding(cparams);
kv_self->n = std::min(kv_self->size, std::max(pad, GGML_PAD(kv_self->cell_max(), pad)));
}
}
//printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self->n, kv_self->used, kv_self->head);
ggml_backend_sched_reset(sched.get()); ggml_backend_sched_reset(sched.get());
ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data);
@ -1421,43 +1050,68 @@ int llama_context::decode(llama_batch & inp_batch) {
// finalize the batch processing // finalize the batch processing
kv_guard.commit(); kv_guard.commit();
// set to total number of outputs in the batch, for use in llama_get_logits_ith
n_outputs = n_outputs_all;
// set output mappings // set output mappings
{ {
bool sorted_output = true; bool sorted_output = true;
GGML_ASSERT(sbatch.out_ids.size() == (size_t) n_outputs_all); auto & out_ids = sbatch.out_ids;
GGML_ASSERT(out_ids.size() == (size_t) n_outputs_all);
for (int64_t i = 0; i < n_outputs_all; ++i) { for (int64_t i = 0; i < n_outputs_all; ++i) {
int64_t out_id = sbatch.out_ids[i]; int64_t out_id = out_ids[i];
output_ids[out_id] = i; output_ids[out_id] = i;
if (out_id != i) { if (out_id != i) {
sorted_output = false; sorted_output = false;
} }
} }
if (sorted_output) { // make the outputs have the same order they had in the user-provided batch
sbatch.out_ids.clear(); // note: this is mostly relevant for recurrent models atm
} if (!sorted_output) {
} const uint32_t n_vocab = model.vocab.n_tokens();
const uint32_t n_embd = model.hparams.n_embd;
// set to total number of outputs in the batch, for use in llama_get_logits_ith GGML_ASSERT((size_t) n_outputs == out_ids.size());
n_outputs = n_outputs_all;
// TODO: is there something more efficient which also minimizes swaps?
// selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
for (int32_t i = 0; i < n_outputs - 1; ++i) {
int32_t j_min = i;
for (int32_t j = i + 1; j < n_outputs; ++j) {
if (out_ids[j] < out_ids[j_min]) {
j_min = j;
}
}
if (j_min == i) { continue; }
std::swap(out_ids[i], out_ids[j_min]);
if (logits_size > 0) {
for (uint32_t k = 0; k < n_vocab; k++) {
std::swap(logits[i*n_vocab + k], logits[j_min*n_vocab + k]);
}
}
if (embd_size > 0) {
for (uint32_t k = 0; k < n_embd; k++) {
std::swap(embd[i*n_embd + k], embd[j_min*n_embd + k]);
}
}
}
std::fill(output_ids.begin(), output_ids.end(), -1);
for (int32_t i = 0; i < n_outputs; ++i) {
output_ids[out_ids[i]] = i;
}
}
}
// wait for the computation to finish (automatically done when obtaining the model output) // wait for the computation to finish (automatically done when obtaining the model output)
//synchronize(); //synchronize();
// decide if we need to defrag the kv cache // decide if we need to defrag the kv cache
if (cparams.causal_attn && cparams.defrag_thold > 0.0f) { if (cparams.defrag_thold > 0.0f) {
// - do not defrag small contexts (i.e. < 2048 tokens) kv_self->defrag_sched(cparams.defrag_thold);
// - count the padding towards the number of used tokens
const float fragmentation = kv_self->n >= 2048 ? std::max(0.0f, 1.0f - float(kv_self->used + kv_self->get_padding(cparams))/float(kv_self->n)) : 0.0f;
// queue defragmentation for next llama_kv_cache_update
if (fragmentation > cparams.defrag_thold) {
LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation);
kv_self->defrag();
}
} }
// Reset state for the next token before backend sync, to allow the CPU activities in the reset to // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
@ -1543,44 +1197,6 @@ int32_t llama_context::output_reserve(int32_t n_outputs) {
return n_outputs_max; return n_outputs_max;
} }
void llama_context::output_reorder() {
auto & out_ids = sbatch.out_ids;
if (!out_ids.empty()) {
const uint32_t n_vocab = model.vocab.n_tokens();
const uint32_t n_embd = model.hparams.n_embd;
GGML_ASSERT((size_t) n_outputs == out_ids.size());
// TODO: is there something more efficient which also minimizes swaps?
// selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
for (int32_t i = 0; i < n_outputs - 1; ++i) {
int32_t j_min = i;
for (int32_t j = i + 1; j < n_outputs; ++j) {
if (out_ids[j] < out_ids[j_min]) {
j_min = j;
}
}
if (j_min == i) { continue; }
std::swap(out_ids[i], out_ids[j_min]);
if (logits_size > 0) {
for (uint32_t k = 0; k < n_vocab; k++) {
std::swap(logits[i*n_vocab + k], logits[j_min*n_vocab + k]);
}
}
if (embd_size > 0) {
for (uint32_t k = 0; k < n_embd; k++) {
std::swap(embd[i*n_embd + k], embd[j_min*n_embd + k]);
}
}
}
std::fill(output_ids.begin(), output_ids.end(), -1);
for (int32_t i = 0; i < n_outputs; ++i) {
output_ids[out_ids[i]] = i;
}
out_ids.clear();
}
}
// //
// graph // graph
// //
@ -1617,7 +1233,7 @@ llm_graph_result_ptr llama_context::graph_build(
/*.backend_cpu =*/ backend_cpu, /*.backend_cpu =*/ backend_cpu,
/*.cvec =*/ &cvec, /*.cvec =*/ &cvec,
/*.loras =*/ &loras, /*.loras =*/ &loras,
/*.memory =*/ kv_self.get(), /*.memory =*/ memory.get(),
/*.cross =*/ &cross, /*.cross =*/ &cross,
/*.n_outputs =*/ n_outputs, /*.n_outputs =*/ n_outputs,
/*.cb =*/ graph_get_cb(), /*.cb =*/ graph_get_cb(),
@ -2021,8 +1637,6 @@ size_t llama_context::state_write_data(llama_io_write_i & io) {
{ {
LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__); LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__);
output_reorder();
const auto n_outputs = this->n_outputs; const auto n_outputs = this->n_outputs;
const auto & output_ids = this->output_ids; const auto & output_ids = this->output_ids;
@ -2076,6 +1690,8 @@ size_t llama_context::state_write_data(llama_io_write_i & io) {
} }
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
kv_self->state_write(io); kv_self->state_write(io);
return io.n_bytes(); return io.n_bytes();
@ -2160,6 +1776,8 @@ size_t llama_context::state_read_data(llama_io_read_i & io) {
} }
LLAMA_LOG_DEBUG("%s: - reading KV self\n", __func__); LLAMA_LOG_DEBUG("%s: - reading KV self\n", __func__);
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
kv_self->state_read(io); kv_self->state_read(io);
return io.n_bytes(); return io.n_bytes();
@ -2168,6 +1786,8 @@ size_t llama_context::state_read_data(llama_io_read_i & io) {
size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) { size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) {
GGML_UNUSED(seq_id); GGML_UNUSED(seq_id);
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
kv_self->state_write(io, seq_id); kv_self->state_write(io, seq_id);
return io.n_bytes(); return io.n_bytes();
@ -2176,6 +1796,8 @@ size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id s
size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id) { size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id) {
GGML_UNUSED(seq_id); GGML_UNUSED(seq_id);
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
kv_self->state_read(io, seq_id); kv_self->state_read(io, seq_id);
return io.n_bytes(); return io.n_bytes();
@ -2531,7 +2153,7 @@ void llama_kv_cache_seq_cp(
llama_seq_id seq_id_dst, llama_seq_id seq_id_dst,
llama_pos p0, llama_pos p0,
llama_pos p1) { llama_pos p1) {
return llama_kv_self_seq_cp(ctx, seq_id_src, seq_id_dst, p0, p1); llama_kv_self_seq_cp(ctx, seq_id_src, seq_id_dst, p0, p1);
} }
void llama_kv_self_seq_cp( void llama_kv_self_seq_cp(
@ -2545,14 +2167,14 @@ void llama_kv_self_seq_cp(
return; return;
} }
return kv->seq_cp(seq_id_src, seq_id_dst, p0, p1); kv->seq_cp(seq_id_src, seq_id_dst, p0, p1);
} }
// deprecated // deprecated
void llama_kv_cache_seq_keep( void llama_kv_cache_seq_keep(
llama_context * ctx, llama_context * ctx,
llama_seq_id seq_id) { llama_seq_id seq_id) {
return llama_kv_self_seq_keep(ctx, seq_id); llama_kv_self_seq_keep(ctx, seq_id);
} }
void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) { void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) {
@ -2561,7 +2183,7 @@ void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) {
return; return;
} }
return kv->seq_keep(seq_id); kv->seq_keep(seq_id);
} }
// deprecated // deprecated
@ -2571,7 +2193,7 @@ void llama_kv_cache_seq_add(
llama_pos p0, llama_pos p0,
llama_pos p1, llama_pos p1,
llama_pos delta) { llama_pos delta) {
return llama_kv_self_seq_add(ctx, seq_id, p0, p1, delta); llama_kv_self_seq_add(ctx, seq_id, p0, p1, delta);
} }
void llama_kv_self_seq_add( void llama_kv_self_seq_add(
@ -2585,7 +2207,7 @@ void llama_kv_self_seq_add(
return; return;
} }
return kv->seq_add(seq_id, p0, p1, delta); kv->seq_add(seq_id, p0, p1, delta);
} }
// deprecated // deprecated
@ -2595,7 +2217,7 @@ void llama_kv_cache_seq_div(
llama_pos p0, llama_pos p0,
llama_pos p1, llama_pos p1,
int d) { int d) {
return llama_kv_self_seq_div(ctx, seq_id, p0, p1, d); llama_kv_self_seq_div(ctx, seq_id, p0, p1, d);
} }
void llama_kv_self_seq_div( void llama_kv_self_seq_div(
@ -2609,7 +2231,7 @@ void llama_kv_self_seq_div(
return; return;
} }
return kv->seq_div(seq_id, p0, p1, d); kv->seq_div(seq_id, p0, p1, d);
} }
// deprecated // deprecated
@ -2628,7 +2250,7 @@ llama_pos llama_kv_self_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) {
// deprecated // deprecated
void llama_kv_cache_defrag(llama_context * ctx) { void llama_kv_cache_defrag(llama_context * ctx) {
return llama_kv_self_defrag(ctx); llama_kv_self_defrag(ctx);
} }
void llama_kv_self_defrag(llama_context * ctx) { void llama_kv_self_defrag(llama_context * ctx) {
@ -2637,7 +2259,8 @@ void llama_kv_self_defrag(llama_context * ctx) {
return; return;
} }
return kv->defrag(); // force defrag
kv->defrag_sched(-1.0f);
} }
// deprecated // deprecated

View file

@ -28,6 +28,11 @@ struct llama_context {
void synchronize(); void synchronize();
const llama_model & get_model() const; const llama_model & get_model() const;
const llama_cparams & get_cparams() const;
ggml_backend_sched_t get_sched() const;
ggml_context * get_ctx_compute() const;
uint32_t n_ctx() const; uint32_t n_ctx() const;
uint32_t n_ctx_per_seq() const; uint32_t n_ctx_per_seq() const;
@ -137,49 +142,30 @@ private:
// Returns max number of outputs for which space was reserved. // Returns max number of outputs for which space was reserved.
int32_t output_reserve(int32_t n_outputs); int32_t output_reserve(int32_t n_outputs);
// make the outputs have the same order they had in the user-provided batch
// TODO: maybe remove this
void output_reorder();
// //
// graph // graph
// //
public:
int32_t graph_max_nodes() const; int32_t graph_max_nodes() const;
// zero-out inputs and create the ctx_compute for the compute graph // zero-out inputs and create the ctx_compute for the compute graph
ggml_cgraph * graph_init(); ggml_cgraph * graph_init();
llm_graph_result_ptr graph_build(
ggml_context * ctx,
ggml_cgraph * gf,
const llama_ubatch & ubatch,
llm_graph_type gtype);
// returns the result of ggml_backend_sched_graph_compute_async execution // returns the result of ggml_backend_sched_graph_compute_async execution
ggml_status graph_compute( ggml_status graph_compute(
ggml_cgraph * gf, ggml_cgraph * gf,
bool batched); bool batched);
private:
llm_graph_result_ptr graph_build(
ggml_context * ctx,
ggml_cgraph * gf,
const llama_ubatch & ubatch,
llm_graph_type gtype);
llm_graph_cb graph_get_cb() const; llm_graph_cb graph_get_cb() const;
// used by kv_self_update()
ggml_tensor * build_rope_shift(
ggml_context * ctx0,
ggml_tensor * cur,
ggml_tensor * shift,
ggml_tensor * factors,
float freq_base,
float freq_scale) const;
llm_graph_result_ptr build_kv_self_shift(
ggml_context * ctx0,
ggml_cgraph * gf) const;
llm_graph_result_ptr build_kv_self_defrag(
ggml_context * ctx0,
ggml_cgraph * gf) const;
// TODO: read/write lora adapters and cvec // TODO: read/write lora adapters and cvec
size_t state_write_data(llama_io_write_i & io); size_t state_write_data(llama_io_write_i & io);
size_t state_read_data (llama_io_read_i & io); size_t state_read_data (llama_io_read_i & io);
@ -196,11 +182,10 @@ private:
llama_cparams cparams; llama_cparams cparams;
llama_adapter_cvec cvec; llama_adapter_cvec cvec;
llama_adapter_loras loras; llama_adapter_loras loras;
llama_sbatch sbatch;
llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
std::unique_ptr<llama_kv_cache_unified> kv_self; std::unique_ptr<llama_memory_i> memory;
// TODO: remove // TODO: remove
bool logits_all = false; bool logits_all = false;

View file

@ -284,24 +284,7 @@ void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) {
// assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
for (uint32_t i = 0; i < n_kv; ++i) { for (uint32_t i = 0; i < n_kv; ++i) {
const uint32_t cell_id = i + kv_self->head; data[i] = kv_self->s_copy(i);
//////////////////////////////////////////////
// TODO: this should not mutate the KV cache !
llama_kv_cell & kv_cell = const_cast<class llama_kv_cache_unified *>(kv_self)->cells[i];
// prevent out-of-bound sources
if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self->size) {
kv_cell.src = cell_id;
}
data[i] = kv_cell.src;
// TODO: do not mutate the KV cache
// ensure copy only happens once
if (kv_cell.src != (int32_t) cell_id) {
kv_cell.src = cell_id;
}
} }
} }
} }
@ -317,18 +300,7 @@ void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) {
// clear unused states // clear unused states
for (int i = 0; i < n_kv; ++i) { for (int i = 0; i < n_kv; ++i) {
const uint32_t cell_id = i + kv_self->head; data[i] = kv_self->s_mask(i);
//////////////////////////////////////////////
// TODO: this should not mutate the KV cache !
llama_kv_cell & kv_cell = const_cast<class llama_kv_cache_unified *>(kv_self)->cells[i];
data[i] = (float) (kv_cell.src >= 0);
// only clear once
if (kv_cell.src < 0) {
kv_cell.src = cell_id;
}
} }
} }
} }
@ -1105,7 +1077,7 @@ ggml_tensor * llm_graph_context::build_inp_cls() const {
} }
ggml_tensor * llm_graph_context::build_inp_s_copy() const { ggml_tensor * llm_graph_context::build_inp_s_copy() const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
auto inp = std::make_unique<llm_graph_input_s_copy>(kv_self); auto inp = std::make_unique<llm_graph_input_s_copy>(kv_self);
@ -1122,7 +1094,7 @@ ggml_tensor * llm_graph_context::build_inp_s_copy() const {
} }
ggml_tensor * llm_graph_context::build_inp_s_mask() const { ggml_tensor * llm_graph_context::build_inp_s_mask() const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
auto inp = std::make_unique<llm_graph_input_s_mask>(kv_self); auto inp = std::make_unique<llm_graph_input_s_mask>(kv_self);
@ -1436,8 +1408,6 @@ ggml_tensor * llm_graph_context::build_attn(
// store to KV cache // store to KV cache
{ {
GGML_ASSERT(!kv_self->recurrent);
const auto kv_head = kv_self->head; const auto kv_head = kv_self->head;
GGML_ASSERT(kv_self->size == n_ctx); GGML_ASSERT(kv_self->size == n_ctx);
@ -1587,7 +1557,7 @@ ggml_tensor * llm_graph_context::build_copy_mask_state(
ggml_tensor * state_mask, ggml_tensor * state_mask,
int32_t n_state, int32_t n_state,
int32_t n_seqs) const { int32_t n_seqs) const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
const auto n_kv = kv_self->n; const auto n_kv = kv_self->n;
const auto kv_head = kv_self->head; const auto kv_head = kv_self->head;
@ -1619,7 +1589,7 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
ggml_tensor * state_mask, ggml_tensor * state_mask,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
const auto token_shift_count = hparams.token_shift_count; const auto token_shift_count = hparams.token_shift_count;
@ -1640,7 +1610,7 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
ggml_tensor * token_shift, ggml_tensor * token_shift,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
const auto token_shift_count = hparams.token_shift_count; const auto token_shift_count = hparams.token_shift_count;
const auto n_embd = hparams.n_embd; const auto n_embd = hparams.n_embd;

View file

@ -19,6 +19,7 @@ struct llama_cparams;
class llama_memory_i; class llama_memory_i;
class llama_kv_cache_unified; class llama_kv_cache_unified;
class llama_kv_cache_recurrent;
// certain models (typically multi-modal) can produce different types of graphs // certain models (typically multi-modal) can produce different types of graphs
enum llm_graph_type { enum llm_graph_type {
@ -186,26 +187,26 @@ public:
class llm_graph_input_s_copy : public llm_graph_input_i { class llm_graph_input_s_copy : public llm_graph_input_i {
public: public:
llm_graph_input_s_copy(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} llm_graph_input_s_copy(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {}
virtual ~llm_graph_input_s_copy() = default; virtual ~llm_graph_input_s_copy() = default;
void set_input(const llama_ubatch * ubatch) override; void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * s_copy; // I32 [kv_size] ggml_tensor * s_copy; // I32 [kv_size]
const llama_kv_cache_unified * kv_self; const llama_kv_cache_recurrent * kv_self;
}; };
class llm_graph_input_s_mask : public llm_graph_input_i { class llm_graph_input_s_mask : public llm_graph_input_i {
public: public:
llm_graph_input_s_mask(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} llm_graph_input_s_mask(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {}
virtual ~llm_graph_input_s_mask() = default; virtual ~llm_graph_input_s_mask() = default;
void set_input(const llama_ubatch * ubatch) override; void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * s_mask; // F32 [1, n_kv] ggml_tensor * s_mask; // F32 [1, n_kv]
const llama_kv_cache_unified * kv_self; const llama_kv_cache_recurrent * kv_self;
}; };
class llm_graph_input_cross_embd : public llm_graph_input_i { class llm_graph_input_cross_embd : public llm_graph_input_i {
@ -350,8 +351,8 @@ struct llm_graph_params {
const llama_cparams & cparams; const llama_cparams & cparams;
const llama_ubatch & ubatch; const llama_ubatch & ubatch;
ggml_backend_sched * sched; ggml_backend_sched_t sched;
ggml_backend * backend_cpu; ggml_backend_t backend_cpu;
const llama_adapter_cvec * cvec; const llama_adapter_cvec * cvec;
const llama_adapter_loras * loras; const llama_adapter_loras * loras;
@ -402,9 +403,9 @@ struct llm_graph_context {
ggml_context * ctx0 = nullptr; ggml_context * ctx0 = nullptr;
ggml_backend_sched * sched; ggml_backend_sched_t sched;
ggml_backend * backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove? ggml_backend_t backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove?
const llama_adapter_cvec * cvec; const llama_adapter_cvec * cvec;
const llama_adapter_loras * loras; const llama_adapter_loras * loras;

File diff suppressed because it is too large Load diff

View file

@ -2,32 +2,72 @@
#include "llama.h" #include "llama.h"
#include "llama-io.h" #include "llama-io.h"
#include "llama-graph.h"
#include "llama-memory.h" #include "llama-memory.h"
#include "ggml-cpp.h" #include "ggml-cpp.h"
#include <functional>
#include <set> #include <set>
#include <vector> #include <vector>
struct llama_cparams; struct llama_cparams;
struct llama_hparams; struct llama_hparams;
struct llama_ubatch; struct llama_ubatch;
struct llama_sbatch;
struct llama_model;
struct llama_context;
struct llama_kv_cache : public llama_memory_i { struct llama_kv_cache : public llama_memory_i {
using llama_memory_i::llama_memory_i; virtual ~llama_kv_cache() = default;
virtual void restore() = 0; // call if batch processing fails - restores the cache state // call if batch processing fails - restores the cache state
virtual void commit() = 0; // call after successful batch processing - clears any pending state virtual void restore() = 0;
// call after successful batch processing - clears any pending state
virtual void commit() = 0;
// process any pending defrag/shift/etc. operations
// optionally call once before processing a new batch
virtual bool update(llama_context & lctx) = 0;
// schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
virtual void defrag_sched(float thold) = 0;
// simulate full cache, used for allocating worst-case compute buffers
virtual void set_full() = 0;
//
// batch processing
//
virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
// different KV caches require different batch splitting strategies
virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0;
// find an empty slot of size "n_tokens" in the cache
virtual bool find_slot(const llama_ubatch & batch) = 0;
// getters
virtual int32_t get_n_tokens() const = 0; virtual int32_t get_n_tokens() const = 0;
virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
virtual llama_pos get_pos_max() const = 0;
virtual bool get_can_shift() const = 0; virtual bool get_can_shift() const = 0;
bool get_can_edit() const override { return get_can_shift(); } bool get_can_edit() const override { return get_can_shift(); }
//
// state write/read
//
virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
}; };
//
// llama_kv_cache_guard
//
struct llama_kv_cache_guard { struct llama_kv_cache_guard {
llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {} llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
@ -43,10 +83,190 @@ private:
llama_kv_cache * kv; llama_kv_cache * kv;
}; };
struct llama_kv_cell { //
// llama_kv_cache_unified
//
// TODO: add notion of max sequences
class llama_kv_cache_unified : public llama_kv_cache {
public:
struct kv_cell {
llama_pos pos = -1; llama_pos pos = -1;
llama_pos delta = 0; llama_pos delta = 0;
int32_t src = -1; // used by recurrent state models to copy states
std::set<llama_seq_id> seq_id;
bool has_seq_id(const llama_seq_id & id) const {
return seq_id.find(id) != seq_id.end();
}
bool is_empty() const {
return seq_id.empty();
}
bool is_same_seq(const kv_cell & other) const {
return seq_id == other.seq_id;
}
};
static uint32_t get_padding(const llama_cparams & cparams);
llama_kv_cache_unified(
const llama_model & model,
ggml_type type_k,
ggml_type type_v,
bool v_trans,
bool offload,
uint32_t kv_size,
uint32_t padding);
~llama_kv_cache_unified() = default;
//
// llama_memory_i
//
void clear() override;
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
void seq_keep(llama_seq_id seq_id) override;
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
//
// llama_kv_cache
//
void restore() override;
void commit() override;
bool update(llama_context & ctx) override;
void defrag_sched(float thold) override;
void set_full() override;
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
// updates the cache head
// Note: On success, it's important that cache.head points
// to the first cell of the slot.
bool find_slot(const llama_ubatch & batch) override;
int32_t get_n_tokens() const override;
int32_t get_used_cells() const override;
// TODO: better data structures to reduce the cost of this operation
llama_pos get_pos_max() const override;
bool get_can_shift() const override;
// state write/load
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
// Note: The value of head isn't only used to optimize searching
// for a free KV slot. llama_decode_impl also uses it, so it
// cannot be freely changed after a slot has been allocated.
uint32_t head = 0;
uint32_t size = 0;
uint32_t used = 0; // used cells (i.e. at least one seq_id)
// computed before each graph build
uint32_t n = 0;
std::vector<kv_cell> cells;
std::vector<ggml_tensor *> k_l; // per layer
std::vector<ggml_tensor *> v_l;
private:
const llama_model & model;
const llama_hparams & hparams;
bool has_shift = false;
bool do_defrag = false;
bool v_trans = true; // the value tensor is transposed
bool can_shift = false;
// required padding
uint32_t padding = 1;
ggml_type type_k = GGML_TYPE_F16;
ggml_type type_v = GGML_TYPE_F16;
std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs;
// defrag
struct {
std::vector<uint32_t> ids;
} defrag_info;
// return true if cells have been moved
bool defrag_prepare(int32_t n_max_nodes);
// commit/restore cache
struct slot_range {
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
uint32_t c1 = 0;
};
// pending cell updates that are not yet committed
struct {
std::vector<slot_range> ranges;
} pending;
// find how many cells are currently in use
uint32_t cell_max() const;
size_t total_size() const;
size_t size_k_bytes() const;
size_t size_v_bytes() const;
ggml_tensor * build_rope_shift(
const llama_cparams & cparams,
ggml_context * ctx,
ggml_tensor * cur,
ggml_tensor * shift,
ggml_tensor * factors,
float freq_base,
float freq_scale) const;
llm_graph_result_ptr build_graph_shift(
const llama_cparams & cparams,
ggml_context * ctx,
ggml_cgraph * gf) const;
llm_graph_result_ptr build_graph_defrag(
const llama_cparams & cparams,
ggml_context * ctx,
ggml_cgraph * gf) const;
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
};
//
// llama_kv_cache_recurrent
//
class llama_kv_cache_recurrent : public llama_kv_cache {
public:
struct kv_cell {
llama_pos pos = -1;
int32_t src = -1; // used to copy states
int32_t tail = -1; int32_t tail = -1;
std::set<llama_seq_id> seq_id; std::set<llama_seq_id> seq_id;
@ -59,49 +279,25 @@ struct llama_kv_cell {
return seq_id.empty(); return seq_id.empty();
} }
bool is_same_seq(const llama_kv_cell & other) const { bool is_same_seq(const kv_cell & other) const {
return seq_id == other.seq_id; return seq_id == other.seq_id;
} }
}; };
// ring-buffer of cached KV data llama_kv_cache_recurrent(
// TODO: pimpl const llama_model & model,
// TODO: add notion of max sequences
class llama_kv_cache_unified : public llama_kv_cache {
public:
// can be used to query data from the model if needed
struct callbacks {
std::function<ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
};
llama_kv_cache_unified(
const llama_hparams & hparams,
callbacks cbs);
virtual ~llama_kv_cache_unified() = default;
// TODO: become constructor
bool init(
const llama_model & model, // TODO: do not reference the model
const llama_cparams & cparams,
ggml_type type_k, ggml_type type_k,
ggml_type type_v, ggml_type type_v,
uint32_t kv_size, bool offload,
bool offload); uint32_t kv_size);
int32_t get_n_tokens() const override; ~llama_kv_cache_recurrent() = default;
int32_t get_used_cells() const override;
size_t total_size() const; //
// llama_memory_i
// TODO: better data structures to reduce the cost of this operation //
llama_pos pos_max() const;
void clear() override; void clear() override;
void defrag() override;
virtual void restore() override;
virtual void commit() override;
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
@ -111,63 +307,41 @@ public:
llama_pos seq_pos_max(llama_seq_id seq_id) const override; llama_pos seq_pos_max(llama_seq_id seq_id) const override;
//
// llama_kv_cache
//
void restore() override;
void commit() override;
bool update(llama_context & lctx) override;
void defrag_sched(float thold) override;
void set_full() override;
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
bool find_slot(const llama_ubatch & batch) override;
int32_t get_n_tokens() const override;
int32_t get_used_cells() const override;
// TODO: better data structures to reduce the cost of this operation
llama_pos get_pos_max() const override;
bool get_can_shift() const override; bool get_can_shift() const override;
// find an empty slot of size "n_tokens" in the cache // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
// updates the cache head int32_t s_copy(int i) const;
// Note: On success, it's important that cache.head points float s_mask(int i) const;
// to the first cell of the slot.
bool find_slot(const llama_ubatch & batch);
// TODO: maybe not needed
uint32_t get_padding(const llama_cparams & cparams) const;
// find how many cells are currently in use
uint32_t cell_max() const;
size_t size_k_bytes() const;
size_t size_v_bytes() const;
// defrag
struct {
std::vector<uint32_t> ids;
} defrag_info;
// return true if cells have been moved
bool defrag_prepare(int32_t n_max_nodes);
// commit/restore cache
struct slot_range {
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
uint32_t c1 = 0;
};
// pending cell updates that are not yet committed
struct {
std::vector<slot_range> ranges;
} pending;
// state write/load // state write/load
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const; void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1); void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
// members
const llama_hparams & hparams;
callbacks cbs;
bool has_shift = false;
bool do_defrag = false;
// TODO: remove this and implement llama_kv_cache_recurrent instead
bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
bool v_trans = true; // the value tensor is transposed
bool can_shift = false;
// Note: The value of head isn't only used to optimize searching // Note: The value of head isn't only used to optimize searching
// for a free KV slot. llama_decode_impl also uses it, so it // for a free KV slot. llama_decode_impl also uses it, so it
@ -179,18 +353,41 @@ public:
// computed before each graph build // computed before each graph build
uint32_t n = 0; uint32_t n = 0;
std::vector<llama_kv_cell> cells; std::vector<kv_cell> cells;
std::vector<ggml_tensor *> k_l; // per layer std::vector<ggml_tensor *> k_l; // per layer
std::vector<ggml_tensor *> v_l; std::vector<ggml_tensor *> v_l;
private: private:
//const llama_model & model;
const llama_hparams & hparams;
// commit/restore cache
// TODO: rework for recurrent cache
struct slot_range {
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
uint32_t c1 = 0;
};
// pending cell updates that are not yet committed
struct {
std::vector<slot_range> ranges;
} pending;
ggml_type type_k = GGML_TYPE_F16; ggml_type type_k = GGML_TYPE_F16;
ggml_type type_v = GGML_TYPE_F16; ggml_type type_v = GGML_TYPE_F16;
std::vector<ggml_context_ptr> ctxs; std::vector<ggml_context_ptr> ctxs;
std::vector<ggml_backend_buffer_ptr> bufs; std::vector<ggml_backend_buffer_ptr> bufs;
// find how many cells are currently in use
uint32_t cell_max() const;
size_t total_size() const;
size_t size_k_bytes() const;
size_t size_v_bytes() const;
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const; void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const; void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
@ -198,11 +395,6 @@ private:
bool state_read_data(llama_io_read_i & io, uint32_t cell_count); bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
}; };
// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
//class llama_kv_cache_recurrent : public llama_kv_cache_unified {
//public:
// using llama_kv_cache_unified::llama_kv_cache_unified;
//};
// //
// kv cache view // kv cache view

View file

@ -2,12 +2,22 @@
#include "llama.h" #include "llama.h"
struct llama_memory_params {
// kv cache
ggml_type type_k;
ggml_type type_v;
// parameters for other types of memory
// ...
};
// general concept of LLM memory // general concept of LLM memory
// the KV cache is a type of LLM memory, but there can be other types // the KV cache is a type of LLM memory, but there can be other types
class llama_memory_i { class llama_memory_i {
public: public:
virtual ~llama_memory_i() = default;
virtual void clear() = 0; virtual void clear() = 0;
virtual void defrag() = 0;
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0; virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0; virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;

View file

@ -778,6 +778,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
// fall through // fall through
case LLM_ARCH_QWEN2: case LLM_ARCH_QWEN2:
{ {
ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) { switch (hparams.n_layer) {
case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break; case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break;
@ -4544,6 +4545,19 @@ const ggml_tensor * llama_model::get_tensor(const char * name) const {
return it->second; return it->second;
} }
ggml_tensor * llama_model::get_rope_factors(uint32_t n_ctx_per_seq, int il) const {
// choose long/short freq factors based on the context size
if (layers[il].rope_freqs != nullptr) {
return layers[il].rope_freqs;
}
if (n_ctx_per_seq > hparams.n_ctx_orig_yarn) {
return layers[il].rope_long;
}
return layers[il].rope_short;
}
struct llm_build_llama : public llm_graph_context { struct llm_build_llama : public llm_graph_context {
llm_build_llama(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { llm_build_llama(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
@ -4587,7 +4601,7 @@ struct llm_build_llama : public llm_graph_context {
#if defined(GGML_USE_CLBLAST) #if defined(GGML_USE_CLBLAST)
struct ggml_tensor * rope_factors = nullptr; //clblast does not work with rope_factors struct ggml_tensor * rope_factors = nullptr; //clblast does not work with rope_factors
#else #else
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
#endif #endif
// compute Q and K and RoPE them // compute Q and K and RoPE them
@ -4813,7 +4827,7 @@ struct llm_build_deci : public llm_graph_context {
} else if (n_head > 0) { } else if (n_head > 0) {
// self-attention // self-attention
// rope freq factors for llama3; may return nullptr for llama2 and other models // rope freq factors for llama3; may return nullptr for llama2 and other models
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
// compute Q and K and RoPE them // compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
@ -7295,7 +7309,7 @@ struct llm_build_phi3 : public llm_graph_context {
// self-attention // self-attention
{ {
// rope freq factors for 128k context // rope freq factors for 128k context
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
ggml_tensor* attn_norm_output = build_norm(inpL, ggml_tensor* attn_norm_output = build_norm(inpL,
model.layers[il].attn_norm, model.layers[il].attn_norm,
@ -8047,7 +8061,7 @@ struct llm_build_minicpm3 : public llm_graph_context {
for (int il = 0; il < n_layer; ++il) { for (int il = 0; il < n_layer; ++il) {
ggml_tensor * inpSA = inpL; ggml_tensor * inpSA = inpL;
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
// norm // norm
cur = build_norm(inpL, cur = build_norm(inpL,
@ -8814,7 +8828,7 @@ struct llm_build_mamba : public llm_graph_context {
ggml_tensor * state_mask, ggml_tensor * state_mask,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
const auto kv_head = kv_self->head; const auto kv_head = kv_self->head;
@ -9115,7 +9129,7 @@ struct llm_build_cohere2 : public llm_graph_context {
// self-attention // self-attention
{ {
// rope freq factors for 128k context // rope freq factors for 128k context
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
// compute Q and K and RoPE them // compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
@ -10053,7 +10067,7 @@ struct llm_build_deepseek : public llm_graph_context {
// self-attention // self-attention
{ {
// rope freq factors for llama3; may return nullptr for llama2 and other models // rope freq factors for llama3; may return nullptr for llama2 and other models
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
// compute Q and K and RoPE them // compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
@ -11417,7 +11431,7 @@ struct llm_build_exaone : public llm_graph_context {
// self-attention // self-attention
{ {
// rope freq factors for llama3; may return nullptr for llama2 and other models // rope freq factors for llama3; may return nullptr for llama2 and other models
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
// compute Q and K and RoPE them // compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
@ -11562,7 +11576,7 @@ struct llm_build_rwkv6_base : public llm_graph_context {
ggml_tensor * state_mask, ggml_tensor * state_mask,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
const auto n_tokens = ubatch.n_tokens; const auto n_tokens = ubatch.n_tokens;
const auto n_seqs = ubatch.n_seqs; const auto n_seqs = ubatch.n_seqs;
@ -11958,7 +11972,7 @@ struct llm_build_rwkv7_base : public llm_graph_context {
ggml_tensor *& first_layer_value, ggml_tensor *& first_layer_value,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory); const llama_kv_cache_recurrent * kv_self = static_cast<const llama_kv_cache_recurrent *>(memory);
const auto n_tokens = ubatch.n_tokens; const auto n_tokens = ubatch.n_tokens;
const auto n_seqs = ubatch.n_seqs; const auto n_seqs = ubatch.n_seqs;
@ -12798,7 +12812,7 @@ struct llm_build_bailingmoe : public llm_graph_context {
// self-attention // self-attention
{ {
// rope freq factors for llama3; may return nullptr for llama2 and other models // rope freq factors for llama3; may return nullptr for llama2 and other models
ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il); ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
// compute Q and K and RoPE them // compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
@ -12918,7 +12932,7 @@ struct llm_build_bailingmoe : public llm_graph_context {
} }
}; };
llama_memory_i * llama_model::create_memory() const { llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const {
llama_memory_i * res; llama_memory_i * res;
switch (arch) { switch (arch) {
@ -12928,26 +12942,29 @@ llama_memory_i * llama_model::create_memory() const {
case LLM_ARCH_RWKV7: case LLM_ARCH_RWKV7:
case LLM_ARCH_ARWKV7: case LLM_ARCH_ARWKV7:
{ {
res = new llama_kv_cache_unified(hparams, { res = new llama_kv_cache_recurrent(
/*.get_rope_factors =*/ nullptr *this,
}); GGML_TYPE_F32,
GGML_TYPE_F32,
cparams.offload_kqv,
std::max((uint32_t) 1, cparams.n_seq_max));
} break; } break;
default: default:
{ {
res = new llama_kv_cache_unified(hparams, { const auto padding = llama_kv_cache_unified::get_padding(cparams);
/*.get_rope_factors =*/ [this](uint32_t n_ctx_per_seq, int il) {
// choose long/short freq factors based on the context size
if (layers[il].rope_freqs != nullptr) {
return layers[il].rope_freqs;
}
if (n_ctx_per_seq > hparams.n_ctx_orig_yarn) { cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
return layers[il].rope_long;
}
return layers[il].rope_short; LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
}
}); res = new llama_kv_cache_unified(
*this,
params.type_k,
params.type_v,
!cparams.flash_attn,
cparams.offload_kqv,
cparams.n_ctx,
padding);
} }
} }

View file

@ -395,8 +395,11 @@ struct llama_model {
const struct ggml_tensor * get_tensor(const char * name) const; const struct ggml_tensor * get_tensor(const char * name) const;
ggml_tensor * get_rope_factors(uint32_t n_ctx_per_seq, int il) const;
// note: can mutate `cparams`
// TODO: move this to new llm_arch_model_i interface // TODO: move this to new llm_arch_model_i interface
llama_memory_i * create_memory() const; // TODO: params llama_memory_i * create_memory(const llama_memory_params & params, llama_cparams & cparams) const;
// TODO: move this to new llm_arch_model_i interface // TODO: move this to new llm_arch_model_i interface
llm_graph_result_ptr build_graph( llm_graph_result_ptr build_graph(