diff --git a/common/arg.cpp b/common/arg.cpp index 0c5f3556e..d4ddf28f9 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -735,23 +735,28 @@ static void common_params_print_completion(common_params_context & ctx_arg) { "llama-completion", "llama-convert-llama2c-to-ggml", "llama-cvector-generator", + "llama-debug", + "llama-diffusion-cli", "llama-embedding", "llama-eval-callback", "llama-export-lora", + "llama-finetune", + "llama-fit-params", + "llama-gemma3-cli", "llama-gen-docs", "llama-gguf", "llama-gguf-hash", "llama-gguf-split", - "llama-gritlm", + "llama-idle", "llama-imatrix", - "llama-infill", - "llama-mtmd-cli", - "llama-llava-clip-quantize-cli", + "llama-llava-cli", "llama-lookahead", "llama-lookup", "llama-lookup-create", "llama-lookup-merge", "llama-lookup-stats", + "llama-minicpmv-cli", + "llama-mtmd-cli", "llama-parallel", "llama-passkey", "llama-perplexity", @@ -2669,7 +2674,8 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params, const std::string & value) { params.out_file = value; } - ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_FINETUNE, LLAMA_EXAMPLE_RESULTS})); + ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_FINETUNE, + LLAMA_EXAMPLE_RESULTS, LLAMA_EXAMPLE_EXPORT_GRAPH_OPS})); add_opt(common_arg( {"-ofreq", "--output-frequency"}, "N", string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq), diff --git a/common/chat.cpp b/common/chat.cpp index f4639ac46..794676962 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1369,6 +1369,77 @@ static common_chat_params common_chat_params_init_lfm2(const common_chat_templat return data; } +static common_chat_params common_chat_params_init_gigachat_v3( + const common_chat_template & tmpl, + const autoparser::templates_params & inputs) { + + common_chat_params data; + + data.prompt = common_chat_template_direct_apply(tmpl, inputs); + data.format = COMMON_CHAT_FORMAT_PEG_NATIVE; + data.supports_thinking = false; + data.preserved_tokens = { + "<|message_sep|>\n\n", + "<|role_sep|>\n", + }; + + auto has_tools = inputs.tools.is_array() && !inputs.tools.empty(); + auto include_grammar = has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE; + auto tool_call_start_prefix = "<|message_sep|>\n\nfunction call<|role_sep|>\n"; + + auto parser = build_chat_peg_parser([&](common_chat_peg_builder & p) { + if (has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) { + // Build a choice of all available tools + auto tool_choice = p.choice(); + for (const auto & tool : inputs.tools) { + const auto & function = tool.at("function"); + std::string name = function.at("name"); + const auto & schema = function.at("parameters"); + + auto tool_name = p.json_member("name", "\"" + p.tool_name(p.literal(name)) + "\""); + auto tool_args = p.json_member("arguments", p.tool_args(p.schema(p.json(), "tool-" + name + "-schema", schema))); + + auto tool_open = p.tool_open(p.literal("{") << tool_name); + + tool_choice |= p.rule("tool-" + name, tool_open << "," << tool_args << "}"); + } + + // Define the tool call structure + auto min_calls = inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_REQUIRED ? 1 : 0; + auto max_calls = 1; // parallel toolcalls are not supported + auto tool_call = p.rule("tool-call", p.literal(tool_call_start_prefix) + tool_choice); + auto tool_calls = p.trigger_rule("tool-call-root", p.repeat(tool_call, /* min = */ min_calls, /* max = */ max_calls)); + + return p.content(p.until("<|message_sep|>\n\n")) << tool_calls; + } + + // Content only parser + include_grammar = false; + return p.content(p.rest()); + + }); + + data.parser = parser.save(); + + if (include_grammar) { + data.grammar_lazy = has_tools && inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_AUTO; + + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool.at("function"); + auto schema = function.at("parameters"); + builder.resolve_refs(schema); + }); + parser.build_grammar(builder, data.grammar_lazy); + }); + + data.grammar_triggers = { + {COMMON_GRAMMAR_TRIGGER_TYPE_WORD, tool_call_start_prefix} + }; + } + return data; +} + namespace workaround { static void map_developer_role_to_system(json & messages) { @@ -1540,6 +1611,15 @@ static common_chat_params common_chat_templates_apply_jinja(const struct common_ return common_chat_params_init_lfm2(tmpl, params); } + // GigaChatV3 format detection + if (src.find("<|role_sep|>") != std::string::npos && + src.find("<|message_sep|>") != std::string::npos && + src.find("<|function_call|>") == std::string::npos + ) { + LOG_DBG("Using specialized template: GigaChatV3\n"); + return common_chat_params_init_gigachat_v3(tmpl, params); + } + try { LOG_DBG("Using differential autoparser\n"); struct autoparser::autoparser autoparser; diff --git a/common/common.h b/common/common.h index 7772ed63a..7f0252663 100644 --- a/common/common.h +++ b/common/common.h @@ -102,6 +102,7 @@ enum llama_example { LLAMA_EXAMPLE_FINETUNE, LLAMA_EXAMPLE_FIT_PARAMS, LLAMA_EXAMPLE_RESULTS, + LLAMA_EXAMPLE_EXPORT_GRAPH_OPS, LLAMA_EXAMPLE_COUNT, }; @@ -923,7 +924,7 @@ const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count"; // MoE utils // -const char * const LLM_FFN_EXPS_REGEX = "\\.ffn_(up|down|gate)_(ch|)exps"; +const char * const LLM_FFN_EXPS_REGEX = "\\.ffn_(up|down|gate|gate_up)_(ch|)exps"; inline std::string llm_ffn_exps_block_regex(int idx) { return string_format("blk\\.%d%s", idx, LLM_FFN_EXPS_REGEX); diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 4a4aac41d..eec0ea14e 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -144,6 +144,7 @@ class ModelBase: self.metadata_override = metadata_override self.model_name = model_name self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py + self._is_nvfp4 = False # Apply heuristics to figure out typical tensor encoding based on first tensor's dtype # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. @@ -271,6 +272,9 @@ class ModelBase: return tensors def dequant_model(self): + if self._is_nvfp4: + return # NVFP4 weights are repacked in _generate_nvfp4_tensors + tensors_to_remove: list[str] = [] new_tensors: dict[str, Callable[[], Tensor]] = {} @@ -516,6 +520,13 @@ class ModelBase: raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses") def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # skip NVFP4 auxiliary tensors (handled in _generate_nvfp4_tensors) + if self._is_nvfp4: + if name.endswith((".weight_scale", ".weight_scale_2", ".input_scale", ".k_scale", ".v_scale")): + return [] + if name.endswith(".weight") and name.replace(".weight", ".weight_scale") in self.model_tensors: + return [] + new_name = self.map_tensor_name(name) # Handle gate/up expert tensor fusion if enabled @@ -551,9 +562,135 @@ class ModelBase: def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: return () + @staticmethod + def _nvfp4_pack(weight: Tensor, scale: Tensor) -> tuple[np.ndarray, list[int]]: + """Repack NVFP4 ModelOpt tensors into ggml super-block layout. + Preserves original E4M3 scale bits as UE4M3 (strip sign bit). + The per-tensor scale2 factor is stored as a separate tensor and applied at inference time via ggml_mul(). + Returns (raw_data, logical_shape).""" + + out_features = weight.shape[0] + n_blocks = scale.shape[1] + + # Unpack ModelOpt nibble-packed weights + w = weight.reshape(out_features, n_blocks, 8) + vals = torch.stack([w & 0x0F, w >> 4], dim=-1).reshape(out_features, n_blocks, 16) + + # Preserve original E4M3 scale bits as UE4M3 (strip sign bit) + d_ue = scale.view(torch.uint8).numpy().reshape(out_features, n_blocks) & 0x7F + qs = (vals[:, :, :8] | (vals[:, :, 8:] << 4)).to(torch.uint8).numpy() + + # Pack into super-blocks: [4 UE4M3 scales, 32 qs bytes] = 36 bytes per 64 elements + n_super = n_blocks // 4 + d_grouped = d_ue.reshape(out_features, n_super, 4) + qs_grouped = qs.reshape(out_features, n_super, 4, 8).reshape(out_features, n_super, 32) + raw = np.concatenate([d_grouped, qs_grouped], axis=-1).reshape(out_features, n_super * 36) + return raw, [out_features, n_super * 64] + + @staticmethod + def _nvfp4_scale2_is_trivial(scale2: Tensor) -> bool: + return scale2.numel() <= 1 and abs(float(scale2.float().sum()) - 1.0) < 1e-6 + + def _repack_nvfp4(self, new_name: str, weight: Tensor, scale: Tensor, scale2: Tensor): + raw, shape = self._nvfp4_pack(weight, scale) + logger.info(f"Repacked {new_name} with shape {shape} and quantization NVFP4") + self.gguf_writer.add_tensor(new_name, raw, raw_dtype=gguf.GGMLQuantizationType.NVFP4) + + # Emit per-tensor scale2 as a separate F32 tensor when non-trivial + if not self._nvfp4_scale2_is_trivial(scale2): + scale2_f32 = scale2.float().numpy().flatten() + scale_name = new_name.replace(".weight", ".scale") + logger.info(f" + {scale_name} (per-tensor NVFP4 scale2, shape [{scale2_f32.size}])") + self.gguf_writer.add_tensor(scale_name, scale2_f32) + + def _generate_nvfp4_tensors(self): + # Per-layer expert merging to avoid holding all experts in memory + expert_blocks: dict[tuple[int, str], list[tuple[int, np.ndarray]]] = {} + expert_scales: dict[tuple[int, str], list[tuple[int, float]]] = {} + expert_shapes: dict[tuple[int, str], list[int]] = {} + n_experts = self.find_hparam(["num_local_experts", "num_experts"], optional=True) or 0 + + for name in list(self.model_tensors.keys()): + if not name.endswith(".weight"): + continue + scale_name = name.replace(".weight", ".weight_scale") + scale2_name = name.replace(".weight", ".weight_scale_2") + if scale_name not in self.model_tensors: + continue + # Force eager materialization of lazy tensors + weight = LazyTorchTensor.to_eager(self.model_tensors[name]()) + scale = LazyTorchTensor.to_eager(self.model_tensors[scale_name]()) + scale2 = LazyTorchTensor.to_eager(self.model_tensors.get(scale2_name, lambda: torch.tensor(1.0))()) + + # Check if this is a per-expert tensor + m = re.search(r'\.experts\.(\d+)\.(gate_proj|up_proj|down_proj)\.weight$', name) + if m: + expert_id = int(m.group(1)) + proj_type = m.group(2) + bid_m = re.search(r'\.layers\.(\d+)\.', name) + bid = int(bid_m.group(1)) if bid_m else 0 + key = (bid, proj_type) + + raw, shape = self._nvfp4_pack(weight, scale) + + if key not in expert_blocks: + expert_blocks[key] = [] + expert_scales[key] = [] + expert_shapes[key] = shape + expert_blocks[key].append((expert_id, raw.copy())) + # Collect per-expert scale2 (scalar per expert) + expert_scales[key].append((expert_id, float(scale2.float().sum()))) + + # Flush when all experts for this (layer, proj) are collected + if n_experts > 0 and len(expert_blocks[key]) >= n_experts: + self._flush_nvfp4_experts(key, expert_blocks, expert_scales, expert_shapes, bid, proj_type) + else: + new_name = self.map_tensor_name(name) + self._repack_nvfp4(new_name, weight, scale, scale2) + + # Flush any remaining experts (fallback if n_experts was unknown) + for (bid, proj_type) in list(expert_blocks.keys()): + self._flush_nvfp4_experts((bid, proj_type), expert_blocks, expert_scales, expert_shapes, bid, proj_type) + + def _flush_nvfp4_experts(self, key, expert_blocks, expert_scales, expert_shapes, bid, proj_type): + experts = expert_blocks.pop(key) + scales = expert_scales.pop(key) + shape = expert_shapes.pop(key) + + experts.sort(key=lambda x: x[0]) + merged = np.stack([e[1] for e in experts], axis=0) + merged_name = f"model.layers.{bid}.mlp.experts.{proj_type}.weight" + new_name = self.map_tensor_name(merged_name) + logger.info(f"Repacked {new_name} with shape [{len(experts)}, {shape[0]}, {shape[1]}] and quantization NVFP4") + self.gguf_writer.add_tensor(new_name, merged, raw_dtype=gguf.GGMLQuantizationType.NVFP4) + + # Emit per-expert scale2 tensor if any expert has non-trivial scale2 + scales.sort(key=lambda x: x[0]) + scale_vals = np.array([s[1] for s in scales], dtype=np.float32) + if not np.allclose(scale_vals, 1.0, atol=1e-6): + scale_name = new_name.replace(".weight", ".scale") + logger.info(f" + {scale_name} (per-expert NVFP4 scale2, shape [{len(scales)}])") + self.gguf_writer.add_tensor(scale_name, scale_vals) + + del experts, merged + def prepare_tensors(self): + # detect NVFP4 quantization (ModelOpt format) + quant_algo = (self.hparams.get("quantization_config") or {}).get("quant_algo") + quant_config_file = self.dir_model / "hf_quant_config.json" + + if not quant_algo and quant_config_file.is_file(): + with open(quant_config_file, "r", encoding="utf-8") as f: + quant_algo = (json.load(f).get("quantization") or {}).get("quant_algo") + + self._is_nvfp4 = quant_algo == "NVFP4" + self.dequant_model() + # NVFP4 weights are repacked and written directly to gguf_writer + if self._is_nvfp4: + self._generate_nvfp4_tensors() + # Handle empty tensor_map for models with block_count=0 (like MobileNetV5) if self.tensor_map.mapping: max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,") @@ -2057,6 +2194,8 @@ class GPTNeoXModel(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) + assert n_head is not None + assert n_embed is not None if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name): # Map bloom-style qkv_linear to gpt-style qkv_linear @@ -2094,6 +2233,8 @@ class BloomModel(TextModel): def set_gguf_parameters(self): n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) + assert n_head is not None + assert n_embed is not None self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) self.gguf_writer.add_embedding_length(n_embed) self.gguf_writer.add_feed_forward_length(4 * n_embed) @@ -2106,6 +2247,8 @@ class BloomModel(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) + assert n_head is not None + assert n_embed is not None name = re.sub(r'transformer\.', '', name) @@ -3716,6 +3859,7 @@ class LLaDAModel(TextModel): if (rope_dim := hparams.get("head_dim")) is None: n_heads = hparams.get("num_attention_heads", hparams.get("n_heads")) + assert n_heads is not None rope_dim = hparams.get("hidden_size", hparams.get("d_model")) // n_heads self.gguf_writer.add_rope_dimension_count(rope_dim) @@ -3747,6 +3891,7 @@ class LLaDAModel(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: n_head = self.hparams.get("num_attention_heads", self.hparams.get("n_heads")) + assert n_head is not None n_kv_head = self.hparams.get("num_key_value_heads", self.hparams.get("n_kv_heads")) if self.undo_permute: @@ -4303,6 +4448,14 @@ class Qwen2MoeModel(TextModel): # process the experts separately name = name.replace("language_model.", "") # InternVL + # NVFP4 expert weights are handled in _generate_nvfp4_tensors + if self._is_nvfp4 and "experts" in name: + if name.endswith((".weight", ".weight_scale", ".weight_scale_2", ".input_scale")): + if name.endswith(".weight") and name.replace(".weight", ".weight_scale") in self.model_tensors: + return + if not name.endswith(".weight"): + return + # handle aggregated expert tensors # GGUF stores dimensions reversed from PyTorch, so: # PyTorch (A,B,C) -> GGUF writes [C,B,A] -> GGML reads ne={C,B,A} @@ -4917,7 +5070,7 @@ class Phi2Model(TextModel): self.gguf_writer.add_add_bos_token(False) -@ModelBase.register("Phi3ForCausalLM") +@ModelBase.register("Phi3ForCausalLM", "Phi4ForCausalLMV") class Phi3MiniModel(TextModel): model_arch = gguf.MODEL_ARCH.PHI3 @@ -5092,6 +5245,129 @@ class Phi3MiniModel(TextModel): yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32)) yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32)) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if name.startswith(("model.vision_tower.", "vision_tower.", "model.mm_projector.", "mm_projector.")): + return + + yield from super().modify_tensors(data_torch, name, bid) + + +@ModelBase.register("Phi4ForCausalLMV") +class Phi4VisionMmprojModel(MmprojModel): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + assert self.hparams_vision is not None + + self.vision_total_layers = int(self.find_vparam(self.n_block_keys)) + if self.vision_total_layers < 2: + raise ValueError( + f"Phi-4 vision mmproj conversion requires at least 2 vision layers, got {self.vision_total_layers}" + ) + + # Phi-4 uses SigLIP2 hidden_states[-2], so export one fewer encoder block and + # drop post-layernorm/head weights. This makes the GGUF runtime output match + # the feature map consumed by the patched siglip.cpp Phi-4 projector path. + self.vision_export_layers = self.vision_total_layers - 1 + self.vision_last_layer_idx = self.vision_total_layers - 1 + + for key in self.n_block_keys: + if key in self.hparams_vision: + self.hparams_vision[key] = self.vision_export_layers + break + + self.block_count = self.vision_export_layers + self.tensor_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.MMPROJ, self.block_count) + + patch_size = self.preprocessor_config.get("patch_size") + if patch_size is None: + raise KeyError("Phi-4 vision mmproj conversion requires patch_size in preprocessor_config.json") + + self.hparams_vision["patch_size"] = patch_size + + pos_emb_name = next( + ( + name for name in self.model_tensors + if name.endswith("vision_model.embeddings.position_embedding.weight") + ), + None, + ) + if pos_emb_name is None: + raise KeyError("Phi-4 vision mmproj conversion could not find position_embedding.weight") + + pos_emb_shape = self.model_tensors[pos_emb_name]().shape + base_grid_tokens = int(pos_emb_shape[0]) + grid_side = math.isqrt(base_grid_tokens) + if grid_side * grid_side != base_grid_tokens: + raise ValueError(f"Unexpected Phi-4 position embedding shape: {tuple(pos_emb_shape)}") + + self.hparams_vision["image_size"] = grid_side * patch_size + + min_num_patches = self.preprocessor_config.get("min_num_patches", self.global_config.get("min_num_patches")) + max_num_patches = self.preprocessor_config.get("max_num_patches", self.global_config.get("max_num_patches")) + if min_num_patches is None or max_num_patches is None: + raise KeyError("Phi-4 vision mmproj conversion requires min_num_patches and max_num_patches") + + self.min_pixels = int(min_num_patches) * patch_size * patch_size + self.max_pixels = int(max_num_patches) * patch_size * patch_size + + def set_gguf_parameters(self): + super().set_gguf_parameters() + assert self.hparams_vision is not None + + self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PHI4) + self.gguf_writer.add_vision_min_pixels(self.min_pixels) + self.gguf_writer.add_vision_max_pixels(self.max_pixels) + self.gguf_writer.add_vision_use_gelu(True) + self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams_vision.get("layer_norm_eps", 1e-6)) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if name.startswith(("model.vision_tower.vision_tower.", "vision_tower.")): + if ".vision_model.head." in name: + return + + new_name = name.replace("model.vision_tower.vision_tower.", "vision_tower.") + + if ".vision_model.post_layernorm." in new_name: + return + + if bid is not None and bid == self.vision_last_layer_idx: + return + + if new_name.endswith("vision_model.embeddings.patch_embedding.weight"): + assert self.hparams_vision is not None + if data_torch.ndim != 2: + raise ValueError(f"Unexpected Phi-4 patch embedding shape: {tuple(data_torch.shape)}") + + patch_area = self.hparams_vision["patch_size"] ** 2 + in_features = data_torch.shape[1] + if in_features % patch_area != 0: + raise ValueError( + f"Phi-4 patch embedding input dim {in_features} is not divisible by patch area {patch_area}" + ) + + num_channels = in_features // patch_area + patch_size = self.hparams_vision["patch_size"] + data_torch = data_torch.view(data_torch.shape[0], patch_size, patch_size, num_channels) + data_torch = data_torch.permute(0, 3, 1, 2) + + yield from super().modify_tensors(data_torch, new_name, bid) + return + + if name.startswith(("model.mm_projector.", "mm_projector.")): + local_name = name + local_name = local_name.replace("model.mm_projector.", "") + local_name = local_name.replace("mm_projector.", "") + + if not (local_name.startswith("0.") or local_name.startswith("2.")): + return + + suffix = ".bias" if local_name.endswith(".bias") else ".weight" + mm_idx = int(local_name.split(".", maxsplit=1)[0]) + yield (self.format_tensor_name(gguf.MODEL_TENSOR.V_MMPROJ, mm_idx, suffix=suffix), data_torch) + return + + return + @ModelBase.register("PhiMoEForCausalLM") class PhiMoeModel(Phi3MiniModel): @@ -9217,7 +9493,9 @@ class ChatGLMModel(TextModel): def set_gguf_parameters(self): n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) + assert n_embed is not None n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) + assert n_head is not None n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head)) self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) self.gguf_writer.add_embedding_length(n_embed) @@ -9824,9 +10102,9 @@ class NemotronHModel(GraniteHybridModel): # Skip Multi-Token Prediction (MTP) tensors. These are used for # for speculative decoding but we don't include them in this model # conversion. See https://github.com/ggml-org/llama.cpp/pull/18886 - if "mtp" in name: + if name.startswith("mtp."): logger.info(f"gguf: Skipping MTP (Speculative) layer: {name}") - return [] + return if name.endswith("mixer.gate.e_score_correction_bias"): new_name = name.replace("e_score_correction_bias", "e_score_correction.bias") diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 7bf45a4c0..cc839340e 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -433,7 +433,8 @@ extern "C" { // GGML_TYPE_IQ4_NL_4_8 = 37, // GGML_TYPE_IQ4_NL_8_8 = 38, GGML_TYPE_MXFP4 = 39, // MXFP4 (1 block) - GGML_TYPE_COUNT = 40, + GGML_TYPE_NVFP4 = 40, // NVFP4 (4 blocks, E4M3 scale) + GGML_TYPE_COUNT = 41, }; // precision @@ -469,6 +470,7 @@ extern "C" { GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors GGML_FTYPE_MOSTLY_MXFP4 = 25, // except 1d tensors + GGML_FTYPE_MOSTLY_NVFP4 = 26, // except 1d tensors }; // available tensor operations: @@ -2482,6 +2484,8 @@ extern "C" { bool lower, bool uni); + // TODO: add ggml_gated_delta_net_set_bcast() to be able to configure Q, K broadcast type: tiled vs interleaved [TAG_GGML_GDN_BCAST] + // ref: https://github.com/ggml-org/llama.cpp/pull/19468#discussion_r2786394306 GGML_API struct ggml_tensor * ggml_gated_delta_net( struct ggml_context * ctx, struct ggml_tensor * q, diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index b3ab9ebb8..cc9196899 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -1462,10 +1462,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s int split_backend_id = split->backend_id; ggml_backend_t split_backend = sched->backends[split_backend_id]; - if (sched->events[split_backend_id][sched->cur_copy] == NULL) { - ggml_backend_synchronize(split_backend); - } - // copy the input tensors to the split backend for (int input_id = 0; input_id < split->n_inputs; input_id++) { ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]); @@ -1476,12 +1472,16 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); + } else { + ggml_backend_synchronize(split_backend); } - ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy); + ggml_backend_tensor_copy(input, input_cpy); } else { // wait for the split backend to finish using the input before overwriting it if (sched->events[split_backend_id][sched->cur_copy] != NULL) { ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); + } else { + ggml_backend_synchronize(split_backend); } // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used @@ -1585,10 +1585,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s } } - if (sched->events[split_backend_id][sched->cur_copy] == NULL) { - ggml_backend_synchronize(split_backend); - } - if (!sched->callback_eval) { enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph); if (ec != GGML_STATUS_SUCCESS) { diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h index 93ab7ea44..92cf739e7 100644 --- a/ggml/src/ggml-common.h +++ b/ggml/src/ggml-common.h @@ -102,6 +102,9 @@ typedef sycl::half2 ggml_half2; #define QI_MXFP4 (QK_MXFP4 / (4 * QR_MXFP4)) #define QR_MXFP4 2 +#define QI_NVFP4 (QK_NVFP4 / (4 * QR_NVFP4)) +#define QR_NVFP4 2 + #define QI5_0 (QK5_0 / (4 * QR5_0)) #define QR5_0 2 @@ -194,6 +197,14 @@ typedef struct { } block_mxfp4; static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + QK_MXFP4/2, "wrong mxfp4 block size/padding"); +#define QK_NVFP4 64 +#define QK_NVFP4_SUB 16 // sub-block size for per-group scales +typedef struct { + uint8_t d[QK_NVFP4/QK_NVFP4_SUB]; // UE4M3 scales (4 bytes, one per 16-element sub-block) + uint8_t qs[QK_NVFP4/2]; // packed 4-bit E2M1 values (32 bytes) +} block_nvfp4; +static_assert(sizeof(block_nvfp4) == sizeof(uint8_t)*(QK_NVFP4/QK_NVFP4_SUB) + QK_NVFP4/2, "wrong nvfp4 block size/padding"); + #define QK5_0 32 typedef struct { ggml_half d; // delta diff --git a/ggml/src/ggml-cpu/arch-fallback.h b/ggml/src/ggml-cpu/arch-fallback.h index 964c23eb9..0286105d2 100644 --- a/ggml/src/ggml-cpu/arch-fallback.h +++ b/ggml/src/ggml-cpu/arch-fallback.h @@ -15,6 +15,7 @@ #define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 #define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K @@ -69,6 +70,8 @@ #define ggml_gemv_q2_K_8x8_q8_K_generic ggml_gemv_q2_K_8x8_q8_K #define ggml_gemm_q2_K_8x8_q8_K_generic ggml_gemm_q2_K_8x8_q8_K #elif defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || defined(_M_X64) +// quants.c +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_K_4x4_generic ggml_quantize_mat_q8_K_4x4 @@ -96,6 +99,7 @@ // ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 // quants.c #define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K @@ -137,6 +141,7 @@ #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 @@ -177,6 +182,7 @@ #define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 #define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 @@ -209,6 +215,7 @@ #elif defined(__s390x__) // quants.c #define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 #define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K #define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K #define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K @@ -265,6 +272,7 @@ #define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 #define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K #define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0 +#define ggml_vec_dot_nvfp4_q8_0_generic ggml_vec_dot_nvfp4_q8_0 // repack.cpp #define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 #define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 diff --git a/ggml/src/ggml-cpu/arch/arm/quants.c b/ggml/src/ggml-cpu/arch/arm/quants.c index a707d6398..c1856201b 100644 --- a/ggml/src/ggml-cpu/arch/arm/quants.c +++ b/ggml/src/ggml-cpu/arch/arm/quants.c @@ -650,6 +650,90 @@ void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo *s = sumf; } +void ggml_vec_dot_nvfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_NVFP4 == 0); + + const block_nvfp4 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + // Each NVFP4 super-block (64 elements) spans 2 q8_0 blocks + const int nb = n / QK_NVFP4; + + float sumf = 0; + +#if defined __ARM_NEON + const int8x16_t values = vld1q_s8(kvalues_mxfp4); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + float32x4_t acc = vdupq_n_f32(0.0f); + + for (int ib = 0; ib < nb; ++ib) { + const uint8x16_t q4bits_0 = vld1q_u8(x[ib].qs); + const uint8x16_t q4bits_1 = vld1q_u8(x[ib].qs + 16); + + const int8x16_t q4_lo_0 = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits_0, m4b)); + const int8x16_t q4_hi_0 = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits_0, 4)); + const int8x16_t q4_lo_1 = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits_1, m4b)); + const int8x16_t q4_hi_1 = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits_1, 4)); + + const int8x16_t q8_0a = vld1q_s8(y[2*ib].qs); + const int8x16_t q8_0b = vld1q_s8(y[2*ib].qs + 16); + const int8x16_t q8_lo_0 = vcombine_s8(vget_low_s8(q8_0a), vget_low_s8(q8_0b)); + const int8x16_t q8_hi_0 = vcombine_s8(vget_high_s8(q8_0a), vget_high_s8(q8_0b)); + + const int8x16_t q8_1a = vld1q_s8(y[2*ib+1].qs); + const int8x16_t q8_1b = vld1q_s8(y[2*ib+1].qs + 16); + const int8x16_t q8_lo_1 = vcombine_s8(vget_low_s8(q8_1a), vget_low_s8(q8_1b)); + const int8x16_t q8_hi_1 = vcombine_s8(vget_high_s8(q8_1a), vget_high_s8(q8_1b)); + + const int32x4_t p0 = vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), q4_lo_0, q8_lo_0), + ggml_vdotq_s32(vdupq_n_s32(0), q4_hi_0, q8_hi_0)); + const int32x4_t p1 = vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), q4_lo_1, q8_lo_1), + ggml_vdotq_s32(vdupq_n_s32(0), q4_hi_1, q8_hi_1)); + + const int32x4_t sums = vpaddq_s32(p0, p1); + + // Decode 4 UE4M3 scales to f32 and multiply with q8 scales + const float dy0 = GGML_CPU_FP16_TO_FP32(y[2*ib].d); + const float dy1 = GGML_CPU_FP16_TO_FP32(y[2*ib+1].d); + const float32x4_t nvsc = { + ggml_ue4m3_to_fp32(x[ib].d[0]), + ggml_ue4m3_to_fp32(x[ib].d[1]), + ggml_ue4m3_to_fp32(x[ib].d[2]), + ggml_ue4m3_to_fp32(x[ib].d[3]) + }; + const float32x4_t scales = vmulq_f32(nvsc, (float32x4_t){dy0, dy0, dy1, dy1}); + + acc = vfmaq_f32(acc, vcvtq_f32_s32(sums), scales); + } + sumf = vaddvq_f32(acc); +#else + for (int ib = 0; ib < nb; ++ib) { + for (int si = 0; si < 4; ++si) { + const float d = ggml_ue4m3_to_fp32(x[ib].d[si]); + const int q8b = si / 2; + const int q8o = (si % 2) * QK_NVFP4_SUB; + const float dy = GGML_CPU_FP16_TO_FP32(y[2*ib + q8b].d); + + int sumi_lo = 0, sumi_hi = 0; + for (int j = 0; j < QK_NVFP4_SUB/2; ++j) { + const uint8_t qv = x[ib].qs[si*(QK_NVFP4_SUB/2) + j]; + sumi_lo += y[2*ib + q8b].qs[q8o + j + 0] * kvalues_mxfp4[qv & 0xf]; + sumi_hi += y[2*ib + q8b].qs[q8o + j + QK_NVFP4_SUB/2] * kvalues_mxfp4[qv >> 4]; + } + sumf += dy * d * (sumi_lo + sumi_hi); + } + } +#endif + *s = sumf; +} + void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 799a24619..728b13753 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -271,6 +271,12 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .vec_dot_type = GGML_TYPE_Q8_0, .nrows = 1, }, + [GGML_TYPE_NVFP4] = { + .from_float = quantize_row_nvfp4, + .vec_dot = ggml_vec_dot_nvfp4_q8_0, + .vec_dot_type = GGML_TYPE_Q8_0, + .nrows = 1, + }, [GGML_TYPE_Q2_K] = { .from_float = quantize_row_q2_K, .vec_dot = ggml_vec_dot_q2_K_q8_K, diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 331e071a2..fa9d27046 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -670,6 +670,7 @@ void ggml_compute_forward_add( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -1119,6 +1120,7 @@ void ggml_compute_forward_add1( case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -1247,6 +1249,7 @@ void ggml_compute_forward_acc( case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -4334,6 +4337,7 @@ void ggml_compute_forward_out_prod( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -4609,6 +4613,7 @@ void ggml_compute_forward_set( case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -4831,6 +4836,7 @@ void ggml_compute_forward_get_rows( case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -5555,6 +5561,7 @@ void ggml_compute_forward_clamp( case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: case GGML_TYPE_MXFP4: + case GGML_TYPE_NVFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -10436,8 +10443,8 @@ static void ggml_compute_forward_gated_delta_net_one_chunk( const float * state_in_base = (const float *)src_state->data; - const int64_t rq1 = nev1 / neq1; - const int64_t rk1 = nev1 / nek1; + //const int64_t rq1 = nev1 / neq1; + //const int64_t rk1 = nev1 / nek1; const int64_t rq3 = nev3 / neq3; const int64_t rk3 = nev3 / nek3; @@ -10447,8 +10454,8 @@ static void ggml_compute_forward_gated_delta_net_one_chunk( const int64_t iv1 = ir % H; // head_index const int64_t iv3 = ir / H; // sequence - const int64_t iq1 = iv1 / rq1; - const int64_t ik1 = iv1 / rk1; + const int64_t iq1 = iv1 % neq1; + const int64_t ik1 = iv1 % nek1; const int64_t iq3 = iv3 / rq3; const int64_t ik3 = iv3 / rk3; @@ -10468,7 +10475,7 @@ static void ggml_compute_forward_gated_delta_net_one_chunk( const float * v_d = (const float *)((const char *)src_v->data + iv3 * nbv3 + t * nbv2 + iv1 * nbv1); const float beta_val = *(const float *)((const char *)src_beta->data + iv3 * nbb3 + t * nbb2 + iv1 * nbb1); - const float * g_d = (const float *)((const char *)src_g->data + iv3 * nbg3 + t * nbg2 + iv1 * nbg1); + const float * g_d = (const float *)((const char *)src_g->data + iv3 * nbg3 + t * nbg2 + iv1 * nbg1); if (kda) { for (int64_t i = 0; i < S_v; ++i) { @@ -10501,7 +10508,6 @@ static void ggml_compute_forward_gated_delta_net_one_chunk( attn_data += S_v * H; // advance to next token } - } } diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c index 365cb36d2..7ebbb9c6f 100644 --- a/ggml/src/ggml-cpu/quants.c +++ b/ggml/src/ggml-cpu/quants.c @@ -50,6 +50,10 @@ void quantize_row_mxfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, i quantize_row_mxfp4_ref(x, y, k); } +void quantize_row_nvfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_nvfp4_ref(x, y, k); +} + // // 2-6 bit quantization in super-blocks // @@ -216,6 +220,42 @@ void ggml_vec_dot_mxfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } +// NVFP4: super-block of 64 elements = 4 sub-blocks of 16 = 2 q8_0 blocks +void ggml_vec_dot_nvfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_NVFP4 == 0); + + const block_nvfp4 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK_NVFP4; + + float sumf = 0; + + for (int ib = 0; ib < nb; ++ib) { + for (int s_idx = 0; s_idx < 4; ++s_idx) { + const float d = ggml_ue4m3_to_fp32(x[ib].d[s_idx]); + const int q8_block = s_idx / 2; + const int q8_off = (s_idx % 2) * QK_NVFP4_SUB; + const float dy = GGML_CPU_FP16_TO_FP32(y[2*ib + q8_block].d); + + int sumi_lo = 0, sumi_hi = 0; + for (int j = 0; j < QK_NVFP4_SUB/2; ++j) { + const uint8_t qv = x[ib].qs[s_idx*(QK_NVFP4_SUB/2) + j]; + sumi_lo += y[2*ib + q8_block].qs[q8_off + j + 0] * kvalues_mxfp4[qv & 0xf]; + sumi_hi += y[2*ib + q8_block].qs[q8_off + j + QK_NVFP4_SUB/2] * kvalues_mxfp4[qv >> 4]; + } + + sumf += dy * d * (sumi_lo + sumi_hi); + } + } + *s = sumf; +} + void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; const int nb = n / qk; diff --git a/ggml/src/ggml-cpu/quants.h b/ggml/src/ggml-cpu/quants.h index d83eb1b14..3584aaa43 100644 --- a/ggml/src/ggml-cpu/quants.h +++ b/ggml/src/ggml-cpu/quants.h @@ -20,6 +20,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, in void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_mxfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_nvfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); @@ -42,6 +43,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_nvfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); @@ -73,6 +75,7 @@ void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_mxfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_nvfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); diff --git a/ggml/src/ggml-cuda/gated_delta_net.cu b/ggml/src/ggml-cuda/gated_delta_net.cu index c249bbc86..5f0fa8e58 100644 --- a/ggml/src/ggml-cuda/gated_delta_net.cu +++ b/ggml/src/ggml-cuda/gated_delta_net.cu @@ -1,36 +1,36 @@ #include "gated_delta_net.cuh" -#include "ggml-cuda/common.cuh" template -__global__ void __launch_bounds__(S_v, 1) -gated_delta_net_cuda(const float * q, - const float * k, - const float * v, - const float * g, - const float * beta, - const float * curr_state, - float * dst, - const int64_t H, - const int64_t n_tokens, - const int64_t n_seqs, - const int64_t sq1, - const int64_t sq2, - const int64_t sq3, - const int64_t sv1, - const int64_t sv2, - const int64_t sv3, - const int64_t sb1, - const int64_t sb2, - const int64_t sb3, - const int64_t rq1, - const int64_t rq3, - const float scale) { - const int64_t h_idx = blockIdx.x; - const int64_t sequence = blockIdx.y; - const int col = threadIdx.x; // each thread owns one column +__global__ void gated_delta_net_cuda(const float * q, + const float * k, + const float * v, + const float * g, + const float * beta, + const float * curr_state, + float * dst, + int64_t H, + int64_t n_tokens, + int64_t n_seqs, + int64_t sq1, + int64_t sq2, + int64_t sq3, + int64_t sv1, + int64_t sv2, + int64_t sv3, + int64_t sb1, + int64_t sb2, + int64_t sb3, + const uint3 neqk1_magic, + const uint3 rq3_magic, + float scale) { + const uint32_t h_idx = blockIdx.x; + const uint32_t sequence = blockIdx.y; + // each warp owns one column, using warp-level primitives to reduce across rows + const int lane = threadIdx.x; + const int col = blockIdx.z * blockDim.y + threadIdx.y; - const int64_t iq1 = h_idx / rq1; - const int64_t iq3 = sequence / rq3; + const uint32_t iq1 = fastmodulo(h_idx, neqk1_magic); + const uint32_t iq3 = fastdiv(sequence, rq3_magic); const int64_t attn_score_elems = S_v * H * n_tokens * n_seqs; float * attn_data = dst; @@ -41,17 +41,14 @@ gated_delta_net_cuda(const float * q, curr_state += state_offset; attn_data += (sequence * n_tokens * H + h_idx) * S_v; - // GCN and CDNA devices spill registers, we use shared mem for them. See https://github.com/ggml-org/llama.cpp/pull/20282#issuecomment-4025770229 - // TODO: check optimal path for RDNA1 and RDNA2 devices. -#if (defined(GGML_USE_HIP) && !defined(RDNA3) && !defined(RDNA4)) || defined(GGML_USE_MUSA) - extern __shared__ float s_shared[]; - float * s = s_shared + col * S_v; -#else - float s[S_v]; -#endif + constexpr int warp_size = ggml_cuda_get_physical_warp_size() < S_v ? ggml_cuda_get_physical_warp_size() : S_v; + static_assert(S_v % warp_size == 0, "S_v must be a multiple of warp_size"); + constexpr int rows_per_lane = (S_v + warp_size - 1) / warp_size; + float s_shard[rows_per_lane]; #pragma unroll - for (int i = 0; i < S_v; i++) { - s[i] = curr_state[i * S_v + col]; + for (int r = 0; r < rows_per_lane; r++) { + const int i = r * warp_size + lane; + s_shard[r] = curr_state[i * S_v + col]; } for (int t = 0; t < n_tokens; t++) { @@ -69,46 +66,61 @@ gated_delta_net_cuda(const float * q, const float g_val = expf(*g_t); // kv[col] = (S^T @ k)[col] = sum_i S[i][col] * k[i] - float kv_col = 0.0f; + float kv_shard = 0.0f; #pragma unroll - for (int i = 0; i < S_v; i++) { - kv_col += s[i] * k_t[i]; + for (int r = 0; r < rows_per_lane; r++) { + const int i = r * warp_size + lane; + kv_shard += s_shard[r] * k_t[i]; } + float kv_col = warp_reduce_sum(kv_shard); // delta[col] = (v[col] - g * kv[col]) * beta float delta_col = (v_t[col] - g_val * kv_col) * beta_val; // fused: S[i][col] = g * S[i][col] + k[i] * delta[col] // attn[col] = (S^T @ q)[col] = sum_i S[i][col] * q[i] - float attn_col = 0.0f; + float attn_partial = 0.0f; #pragma unroll - for (int i = 0; i < S_v; i++) { - s[i] = g_val * s[i] + k_t[i] * delta_col; - attn_col += s[i] * q_t[i]; + for (int r = 0; r < rows_per_lane; r++) { + const int i = r * warp_size + lane; + s_shard[r] = g_val * s_shard[r] + k_t[i] * delta_col; + attn_partial += s_shard[r] * q_t[i]; } - attn_data[col] = attn_col * scale; + float attn_col = warp_reduce_sum(attn_partial); + + if (lane == 0) { + attn_data[col] = attn_col * scale; + } } else { // kv[col] = sum_i g[i] * S[i][col] * k[i] - float kv_col = 0.0f; + float kv_shard = 0.0f; #pragma unroll - for (int i = 0; i < S_v; i++) { - kv_col += expf(g_t[i]) * s[i] * k_t[i]; + for (int r = 0; r < rows_per_lane; r++) { + const int i = r * warp_size + lane; + kv_shard += expf(g_t[i]) * s_shard[r] * k_t[i]; } + float kv_col = warp_reduce_sum(kv_shard); + // delta[col] = (v[col] - kv[col]) * beta float delta_col = (v_t[col] - kv_col) * beta_val; // fused: S[i][col] = g[i] * S[i][col] + k[i] * delta[col] // attn[col] = (S^T @ q)[col] = sum_i S[i][col] * q[i] - float attn_col = 0.0f; + float attn_partial = 0.0f; #pragma unroll - for (int i = 0; i < S_v; i++) { - s[i] = expf(g_t[i]) * s[i] + k_t[i] * delta_col; - attn_col += s[i] * q_t[i]; + for (int r = 0; r < rows_per_lane; r++) { + const int i = r * warp_size + lane; + s_shard[r] = expf(g_t[i]) * s_shard[r] + k_t[i] * delta_col; + attn_partial += s_shard[r] * q_t[i]; } - attn_data[col] = attn_col * scale; + float attn_col = warp_reduce_sum(attn_partial); + + if (lane == 0) { + attn_data[col] = attn_col * scale; + } } attn_data += S_v * H; @@ -116,8 +128,9 @@ gated_delta_net_cuda(const float * q, // Write state back to global memory #pragma unroll - for (int i = 0; i < S_v; i++) { - state[i * S_v + col] = s[i]; + for (int r = 0; r < rows_per_lane; r++) { + const int i = r * warp_size + lane; + state[i * S_v + col] = s_shard[r]; } } @@ -135,35 +148,43 @@ static void launch_gated_delta_net( const float * q_d, const float * k_d, const float * v_d, const float * g_d, const float * b_d, const float * s_d, float * dst_d, - int64_t S_v, int64_t H, int64_t n_tokens, int64_t n_seqs, - int64_t sq1, int64_t sq2, int64_t sq3, - int64_t sv1, int64_t sv2, int64_t sv3, - int64_t sb1, int64_t sb2, int64_t sb3, - int64_t rq1, int64_t rq3, + int64_t S_v, int64_t H, int64_t n_tokens, int64_t n_seqs, + int64_t sq1, int64_t sq2, int64_t sq3, + int64_t sv1, int64_t sv2, int64_t sv3, + int64_t sb1, int64_t sb2, int64_t sb3, + int64_t neqk1, int64_t rq3, float scale, cudaStream_t stream) { + //TODO: Add chunked kernel for even faster pre-fill + const int warp_size = ggml_cuda_info().devices[ggml_cuda_get_device()].warp_size; + const int num_warps = 4; + dim3 grid_dims(H, n_seqs, (S_v + num_warps - 1) / num_warps); + dim3 block_dims(warp_size <= S_v ? warp_size : S_v, num_warps, 1); - dim3 grid_dims(H, n_seqs, 1); - dim3 block_dims(S_v, 1, 1); + const uint3 neqk1_magic = init_fastdiv_values(neqk1); + const uint3 rq3_magic = init_fastdiv_values(rq3); int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; switch (S_v) { - case 32: { - constexpr int sv = 32; - size_t smem = calculate_smem(sv, cc); - gated_delta_net_cuda<<>>( + case 16: + gated_delta_net_cuda<16, KDA><<>>( q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens, n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, - sb1, sb2, sb3, rq1, rq3, scale); + sb1, sb2, sb3, neqk1_magic, rq3_magic, scale); + break; + case 32: + gated_delta_net_cuda<32, KDA><<>>( + q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, + n_tokens, n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, + sb1, sb2, sb3, neqk1_magic, rq3_magic, scale); break; - } case 64: { constexpr int sv = 64; size_t smem = calculate_smem(sv, cc); gated_delta_net_cuda<<>>( q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens, n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, - sb1, sb2, sb3, rq1, rq3, scale); + sb1, sb2, sb3, neqk1_magic, rq3_magic, scale); break; } case 128: { @@ -172,7 +193,7 @@ static void launch_gated_delta_net( gated_delta_net_cuda<<>>( q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens, n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, - sb1, sb2, sb3, rq1, rq3, scale); + sb1, sb2, sb3, neqk1_magic, rq3_magic, scale); break; } default: @@ -190,10 +211,12 @@ void ggml_cuda_op_gated_delta_net(ggml_backend_cuda_context & ctx, ggml_tensor * ggml_tensor * src_state = dst->src[5]; GGML_TENSOR_LOCALS(int64_t, neq, src_q, ne); - GGML_TENSOR_LOCALS(size_t, nbq, src_q, nb); + GGML_TENSOR_LOCALS(size_t , nbq, src_q, nb); + GGML_TENSOR_LOCALS(int64_t, nek, src_k, ne); + GGML_TENSOR_LOCALS(size_t , nbk, src_k, nb); GGML_TENSOR_LOCALS(int64_t, nev, src_v, ne); - GGML_TENSOR_LOCALS(size_t, nbv, src_v, nb); - GGML_TENSOR_LOCALS(size_t, nbb, src_beta, nb); + GGML_TENSOR_LOCALS(size_t, nbv, src_v, nb); + GGML_TENSOR_LOCALS(size_t, nbb, src_beta, nb); const int64_t S_v = nev0; const int64_t H = nev1; @@ -202,7 +225,9 @@ void ggml_cuda_op_gated_delta_net(ggml_backend_cuda_context & ctx, ggml_tensor * const bool kda = (src_g->ne[0] == S_v); - const int64_t rq1 = nev1 / neq1; + GGML_ASSERT(neq1 == nek1); + const int64_t neqk1 = neq1; + const int64_t rq3 = nev3 / neq3; const float * q_d = (const float *) src_q->data; @@ -241,10 +266,10 @@ void ggml_cuda_op_gated_delta_net(ggml_backend_cuda_context & ctx, ggml_tensor * if (kda) { launch_gated_delta_net(q_d, k_d, v_d, g_d, b_d, s_d, dst_d, S_v, H, n_tokens, n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, - sb1, sb2, sb3, rq1, rq3, scale, stream); + sb1, sb2, sb3, neqk1, rq3, scale, stream); } else { launch_gated_delta_net(q_d, k_d, v_d, g_d, b_d, s_d, dst_d, S_v, H, n_tokens, n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, - sb1, sb2, sb3, rq1, rq3, scale, stream); + sb1, sb2, sb3, neqk1, rq3, scale, stream); } } diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 9d578dca1..481fffe27 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2835,14 +2835,11 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_ ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer; ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer; - //enables async copies from CPU to CUDA, instead of only CUDA-to-CUDA - bool copy_from_host = ggml_backend_buffer_is_host(buf_src) && ggml_backend_dev_type(backend_src->device) == GGML_BACKEND_DEVICE_TYPE_CPU; - - if (!(copy_from_host || ggml_backend_is_cuda(backend_src)) || !ggml_backend_is_cuda(backend_dst)) { + if (!ggml_backend_is_cuda(backend_src) || !ggml_backend_is_cuda(backend_dst)) { return false; } - if (!(copy_from_host || ggml_backend_buffer_is_cuda(buf_src)) || !ggml_backend_buffer_is_cuda(dst->buffer)) { + if (!ggml_backend_buffer_is_cuda(src->buffer) || !ggml_backend_buffer_is_cuda(dst->buffer)) { return false; } @@ -2853,17 +2850,14 @@ static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_ ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context; ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context; - if ((copy_from_host && cuda_ctx_dst->device != buf_ctx_dst->device) || - !copy_from_host && (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device)) { + if (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: backend and buffer devices do not match\n", __func__); #endif return false; } - if (copy_from_host) { - CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyHostToDevice, cuda_ctx_dst->stream())); - } else if (backend_src != backend_dst) { + if (backend_src != backend_dst) { // copy on src stream if (cuda_ctx_src->device == cuda_ctx_dst->device) { CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream())); diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index e3714b38a..925686559 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -491,6 +491,61 @@ static inline float ggml_e8m0_to_fp32_half(uint8_t x) { #define GGML_E8M0_TO_FP32(x) ggml_e8m0_to_fp32(x) #define GGML_E8M0_TO_FP32_HALF(x) ggml_e8m0_to_fp32_half(x) +// UE4M3: unsigned, 4 exp bits (bias=7), 3 mantissa bits +// Returns value * 0.5 to match kvalues_mxfp4 convention (kvalues = 2 * E2M1_float) +static inline float ggml_ue4m3_to_fp32(uint8_t x) { + if (x == 0 || x == 0x7F) { + return 0.0f; + } + int exp = (x >> 3) & 0xF; + int man = x & 0x7; + float raw; + if (exp == 0) { + raw = ldexpf((float) man, -9); + } else { + raw = ldexpf(1.0f + (float) man / 8.0f, exp - 7); + } + return raw * 0.5f; +} + +static inline uint8_t ggml_fp32_to_ue4m3(float x) { + if (!(x > 0.0f)) { + return 0; + } + if (x > 448.0f) { + x = 448.0f; + } + uint32_t bits; + memcpy(&bits, &x, 4); + int fp32_exp = ((bits >> 23) & 0xFF) - 127; + int fp32_man = (bits >> 20) & 0x7; + int ue4m3_exp = fp32_exp + 7; + if (ue4m3_exp <= 0) { + // subnormal: value = man * 2^-9, man = round(x * 2^9) + int man = (int) (x * 512.0f + 0.5f); + if (man > 7) { + man = 7; + } + if (man < 1) { + return 0; + } + return (uint8_t) man; + } + if (ue4m3_exp >= 15) { + return 0x7E; + } + int round_bit = (bits >> 19) & 1; + int ue4m3_man = fp32_man + round_bit; + if (ue4m3_man > 7) { + ue4m3_man = 0; + ue4m3_exp++; + if (ue4m3_exp >= 15) { + return 0x7E; + } + } + return (uint8_t) ((ue4m3_exp << 3) | ue4m3_man); +} + /** * Converts brain16 to float32. * diff --git a/ggml/src/ggml-metal/ggml-metal-context.m b/ggml/src/ggml-metal/ggml-metal-context.m index 855fd1ada..32d97cd5d 100644 --- a/ggml/src/ggml-metal/ggml-metal-context.m +++ b/ggml/src/ggml-metal/ggml-metal-context.m @@ -554,7 +554,7 @@ enum ggml_status ggml_metal_graph_compute(ggml_metal_t ctx, struct ggml_cgraph * // enter here only when capturing in order to wait for all computation to finish // otherwise, we leave the graph to compute asynchronously - if (!use_capture && ctx->capture_started) { + if (use_capture && ctx->capture_started) { // wait for completion and check status of each command buffer // needed to detect if the device ran out-of-memory for example (#1881) { @@ -606,6 +606,8 @@ enum ggml_status ggml_metal_graph_compute(ggml_metal_t ctx, struct ggml_cgraph * [ctx->capture_scope endScope]; [[MTLCaptureManager sharedCaptureManager] stopCapture]; + + ctx->capture_started = false; } } diff --git a/ggml/src/ggml-metal/ggml-metal-device.cpp b/ggml/src/ggml-metal/ggml-metal-device.cpp index 169c63dd7..72ad876d5 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.cpp +++ b/ggml/src/ggml-metal/ggml-metal-device.cpp @@ -577,6 +577,41 @@ ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv(ggml_metal_ return res; } +ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_gated_delta_net(ggml_metal_library_t lib, const ggml_tensor * op) { + char base[256]; + char name[256]; + + // v is src[2], dimensions: S_v = ne[0], H = ne[1] + const int ne20 = op->src[2]->ne[0]; // S_v + const int ne21 = op->src[2]->ne[1]; // H + const int ne30 = op->src[3]->ne[0]; // G + + const int nsg = op->src[2]->ne[0]/32; + + GGML_ASSERT(op->src[5]->type == GGML_TYPE_F32); + GGML_ASSERT(op->ne[0] == ne20 * ne21); + GGML_ASSERT(ne20 % 32 == 0); + + snprintf(base, 256, "kernel_gated_delta_net_%s_%d", ggml_type_name(op->src[0]->type), nsg); + snprintf(name, 256, "%s_ne20=%d_ne30=%d", base, ne20, ne30); + + ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); + if (!res.pipeline) { + ggml_metal_cv_t cv = ggml_metal_cv_init(); + + ggml_metal_cv_set_int16(cv, ne20, FC_GATED_DELTA_NET + 0); + ggml_metal_cv_set_int16(cv, ne30, FC_GATED_DELTA_NET + 1); + + res = ggml_metal_library_compile_pipeline(lib, base, name, cv); + + ggml_metal_cv_free(cv); + } + + res.nsg = nsg; + + return res; +} + ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_solve_tri(ggml_metal_library_t lib, const ggml_tensor * op) { char base[256]; char name[256]; @@ -1435,10 +1470,11 @@ ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin(ggml_metal_l const bool is_c4 = (op->src[0]->ne[0] % 4 == 0) && (op->src[1]->ne[0] % 4 == 0); + const bool is_cb = op->src[0]->ne[0] != op->src[1]->ne[0]; const bool is_rb = ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]) && (ggml_nrows(op->src[1]) == 1) && ggml_nelements(op) < 65536; snprintf(base, 256, "kernel_bin_fuse_%s_%s_%s%s", t0_str, t1_str, t_str, is_c4 ? "_4" : ""); - snprintf(name, 256, "%s_op=%d_nf=%d_rb=%d", base, op_num, n_fuse, is_rb); + snprintf(name, 256, "%s_op=%d_nf=%d_rb=%d_cb=%d", base, op_num, n_fuse, is_rb, is_cb); ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name); if (!res.pipeline) { @@ -1447,6 +1483,7 @@ ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin(ggml_metal_l ggml_metal_cv_set_int16(cv, op_num, FC_BIN + 0); ggml_metal_cv_set_int16(cv, n_fuse, FC_BIN + 1); ggml_metal_cv_set_bool (cv, is_rb, FC_BIN + 2); + ggml_metal_cv_set_bool (cv, is_cb, FC_BIN + 3); res = ggml_metal_library_compile_pipeline(lib, base, name, cv); diff --git a/ggml/src/ggml-metal/ggml-metal-device.h b/ggml/src/ggml-metal/ggml-metal-device.h index 93d7f6a21..fd2b3ddeb 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.h +++ b/ggml/src/ggml-metal/ggml-metal-device.h @@ -125,6 +125,7 @@ struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv_batched (ggml_metal_library_t lib, const struct ggml_tensor * op, int ssm_conv_bs); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_scan (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv (ggml_metal_library_t lib, const struct ggml_tensor * op); +struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_gated_delta_net (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_solve_tri (ggml_metal_library_t lib, const struct ggml_tensor * op); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_ext (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1, int nsg, int nxpsg, int r1ptg); struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm (ggml_metal_library_t lib, const struct ggml_tensor * op); diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index e75c1266a..d020df220 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -1161,10 +1161,12 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: return true; + case GGML_OP_GATED_DELTA_NET: + return has_simdgroup_reduction && op->src[2]->ne[0] % 32 == 0; case GGML_OP_SOLVE_TRI: case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: - return has_simdgroup_reduction; + return has_simdgroup_reduction && op->src[0]->type != GGML_TYPE_NVFP4; case GGML_OP_SET: case GGML_OP_CPY: case GGML_OP_DUP: @@ -1222,7 +1224,7 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te }; } case GGML_OP_GET_ROWS: - return true; + return op->src[0]->type != GGML_TYPE_NVFP4; case GGML_OP_SET_ROWS: { if (op->src[0]->type != GGML_TYPE_F32) { diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index 99d64efc3..53437b23c 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -84,6 +84,7 @@ #define FC_BIN 1300 #define FC_SUM_ROWS 1400 #define FC_UPSCALE 1500 +#define FC_GATED_DELTA_NET 1600 // op-specific constants #define OP_FLASH_ATTN_EXT_NQPSG 8 @@ -793,6 +794,44 @@ typedef struct { uint64_t nb0; } ggml_metal_kargs_ssm_scan; +typedef struct { + int32_t ne00; + int32_t ne01; + int32_t ne02; + int32_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne10; + int32_t ne11; + int32_t ne12; + int32_t ne13; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + int32_t ne20; + int32_t ne21; + int32_t ne22; + int32_t ne23; + uint64_t nb20; + uint64_t nb21; + uint64_t nb22; + uint64_t nb23; + int32_t ns02; + int32_t ns12; + int32_t ns22; + int32_t ne0; + int32_t ne1; + int32_t ne2; + int32_t ne3; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; +} ggml_metal_kargs_gated_delta_net; + typedef struct { int32_t ne00; int32_t ne01; diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index 267755d08..c0bcad392 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -333,6 +333,10 @@ static int ggml_metal_op_encode_impl(ggml_metal_op_t ctx, int idx) { { n_fuse = ggml_metal_op_rwkv(ctx, idx); } break; + case GGML_OP_GATED_DELTA_NET: + { + n_fuse = ggml_metal_op_gated_delta_net(ctx, idx); + } break; case GGML_OP_SOLVE_TRI: { n_fuse = ggml_metal_op_solve_tri(ctx, idx); @@ -1562,6 +1566,81 @@ int ggml_metal_op_rwkv(ggml_metal_op_t ctx, int idx) { return 1; } +int ggml_metal_op_gated_delta_net(ggml_metal_op_t ctx, int idx) { + ggml_tensor * op = ctx->node(idx); + + ggml_metal_library_t lib = ctx->lib; + ggml_metal_encoder_t enc = ctx->enc; + + + GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); + GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); + GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); + GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); + GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne); + GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb); + GGML_TENSOR_LOCALS( int32_t, ne, op, ne); + GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); + + auto pipeline = ggml_metal_library_get_pipeline_gated_delta_net(lib, op); + + int ida = 0; + + ggml_metal_kargs_gated_delta_net args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.ne03 =*/ ne03, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne10 =*/ ne10, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.ne13 =*/ ne13, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.ne20 =*/ ne20, + /*.ne21 =*/ ne21, + /*.ne22 =*/ ne22, + /*.ne23 =*/ ne23, + /*.nb20 =*/ nb20, + /*.nb21 =*/ nb21, + /*.nb22 =*/ nb22, + /*.nb23 =*/ nb23, + /*.ns02 =*/ (int32_t) (nb02/sizeof(float)), + /*.ns12 =*/ (int32_t) (nb12/sizeof(float)), + /*.ns22 =*/ (int32_t) (nb22/sizeof(float)), + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.ne2 =*/ ne2, + /*.ne3 =*/ ne3, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + }; + + ggml_metal_encoder_set_pipeline(enc, pipeline); + ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), ida++); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++); // q + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++); // k + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++); // v + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), ida++); // gate + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), ida++); // beta + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[5]), ida++); // state + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), ida++); // dst + + const int nsg = pipeline.nsg; + + ggml_metal_encoder_dispatch_threadgroups(enc, op->src[2]->ne[0]/nsg, op->src[2]->ne[1], op->src[2]->ne[3], 32, nsg, 1); + + return 1; +} + int ggml_metal_op_solve_tri(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); @@ -3101,9 +3180,7 @@ int ggml_metal_op_bin(ggml_metal_op_t ctx, int idx) { ggml_metal_encoder_set_buffer (enc, bid_dst, 3); if (pipeline.cnt) { - const int n = pipeline.c4 ? ggml_nelements(op)/4 : ggml_nelements(op); - - ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1); + ggml_metal_encoder_dispatch_threadgroups(enc, args.ne0, ggml_nrows(op), 1, 1, 1, 1); } else { const int nth_max = MIN(256, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)); diff --git a/ggml/src/ggml-metal/ggml-metal-ops.h b/ggml/src/ggml-metal/ggml-metal-ops.h index f3e38c7aa..019f2fec9 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.h +++ b/ggml/src/ggml-metal/ggml-metal-ops.h @@ -58,6 +58,7 @@ int ggml_metal_op_soft_max (ggml_metal_op_t ctx, int idx); int ggml_metal_op_ssm_conv (ggml_metal_op_t ctx, int idx); int ggml_metal_op_ssm_scan (ggml_metal_op_t ctx, int idx); int ggml_metal_op_rwkv (ggml_metal_op_t ctx, int idx); +int ggml_metal_op_gated_delta_net (ggml_metal_op_t ctx, int idx); int ggml_metal_op_solve_tri (ggml_metal_op_t ctx, int idx); int ggml_metal_op_set (ggml_metal_op_t ctx, int idx); int ggml_metal_op_cpy (ggml_metal_op_t ctx, int idx); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 29e4a245d..107e7cf2f 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -1111,6 +1111,7 @@ template [[host_name("kernel_unary_f16_f16_4")]] kernel kernel_unary_t kernel_un constant short FC_bin_op [[function_constant(FC_BIN + 0)]]; constant short FC_bin_f [[function_constant(FC_BIN + 1)]]; constant bool FC_bin_rb [[function_constant(FC_BIN + 2)]]; +constant bool FC_bin_cb [[function_constant(FC_BIN + 3)]]; template kernel void kernel_bin_fuse_impl( @@ -1124,11 +1125,12 @@ kernel void kernel_bin_fuse_impl( #define FC_OP FC_bin_op #define FC_F FC_bin_f #define FC_RB FC_bin_rb +#define FC_CB FC_bin_cb if (FC_RB) { // row broadcast - const uint i0 = tgpig.x; - const uint i1 = i0%args.ne10; + const uint i0 = tgpig.y*args.ne00 + tgpig.x; + const uint i1 = FC_CB ? tgpig.x%args.ne10 : tgpig.x; device const T0 * src0_row = (device const T0 *) (src0); device T * dst_row = (device T *) (dst); @@ -1200,7 +1202,7 @@ kernel void kernel_bin_fuse_impl( device const T1 * src1_ptr = (device const T1 *) (src1 + args.o1[0] + i13*args.nb13 + i12*args.nb12 + i11*args.nb11); for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { - const int i10 = i0%args.ne10; + const int i10 = FC_CB ? i0%args.ne10 : i0; if (FC_OP == 0) { dst_ptr[i0] = src0_ptr[i0] + src1_ptr[i10]; @@ -1225,7 +1227,7 @@ kernel void kernel_bin_fuse_impl( } for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { - const int i10 = i0%args.ne10; + const int i10 = FC_CB ? i0%args.ne10 : i0; T res = src0_ptr[i0]; @@ -1261,6 +1263,7 @@ kernel void kernel_bin_fuse_impl( #undef FC_OP #undef FC_F #undef FC_RB +#undef FC_CB } typedef decltype(kernel_bin_fuse_impl) kernel_bin_fuse_t; @@ -2434,6 +2437,227 @@ kernel void kernel_rwkv_wkv7_f32( } } +constant short FC_gated_delta_net_ne20 [[function_constant(FC_GATED_DELTA_NET + 0)]]; +constant short FC_gated_delta_net_ne30 [[function_constant(FC_GATED_DELTA_NET + 1)]]; + +#if 1 +template +kernel void kernel_gated_delta_net_impl( + constant ggml_metal_kargs_gated_delta_net & args, + device const char * q, + device const char * k, + device const char * v, + device const char * g, + device const char * b, + device const char * s, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { +#define S_v FC_gated_delta_net_ne20 +#define G FC_gated_delta_net_ne30 + + const uint tx = tpitg.x; + const uint ty = tpitg.y; + + const uint i23 = tgpig.z; // B + const uint i21 = tgpig.y; // H + const uint i20 = tgpig.x*NSG + ty; + + const uint i01 = i21 % args.ne01; + const uint i11 = i21 % args.ne11; + + const float scale = 1.0f / sqrt((float)S_v); + + device const float * s_ptr = (device const float *) (s) + (i23*args.ne21 + i21)*S_v*S_v + i20; + + float ls[NSG]; + + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + ls[j] = s_ptr[is*S_v]; + } + + device float * dst_attn = (device float *) (dst) + (i23*args.ne22*args.ne21 + i21)*S_v + i20; + + device const float * q_ptr = (device const float *) (q + i23*args.nb03 + i01*args.nb01); + device const float * k_ptr = (device const float *) (k + i23*args.nb13 + i11*args.nb11); + device const float * v_ptr = (device const float *) (v + i23*args.nb23 + i21*args.nb21); + + device const float * b_ptr = (device const float *) (b) + (i23*args.ne22*args.ne21 + i21); + device const float * g_ptr = (device const float *) (g) + (i23*args.ne22*args.ne21 + i21)*G; + + for (short t = 0; t < args.ne22; t++) { + float s_k = 0.0f; + + if (G == 1) { + const float g_exp = exp(g_ptr[0]); + + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + ls[j] *= g_exp; + + s_k += ls[j]*k_ptr[is]; + } + } else { + // KDA + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + ls[j] *= exp(g_ptr[is]); + + s_k += ls[j]*k_ptr[is]; + } + } + + s_k = simd_sum(s_k); + + const float d = (v_ptr[i20] - s_k)*b_ptr[0]; + + float y = 0.0f; + + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + ls[j] += k_ptr[is]*d; + + y += ls[j]*q_ptr[is]; + } + + y = simd_sum(y); + + if (tx == 0) { + dst_attn[t*args.ne21*S_v] = y*scale; + } + + q_ptr += args.ns02; + k_ptr += args.ns12; + v_ptr += args.ns22; + + b_ptr += args.ne21; + g_ptr += args.ne21*G; + } + + device float * dst_state = (device float *) (dst) + args.ne23*args.ne22*args.ne21*S_v + (i23*args.ne21 + i21)*S_v*S_v + i20; + + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + dst_state[is*S_v] = ls[j]; + } + +#undef S_v +#undef G +} + +typedef decltype(kernel_gated_delta_net_impl<4>) kernel_gated_delta_net_t; + +template [[host_name("kernel_gated_delta_net_f32_1")]] kernel kernel_gated_delta_net_t kernel_gated_delta_net_impl<1>; +template [[host_name("kernel_gated_delta_net_f32_2")]] kernel kernel_gated_delta_net_t kernel_gated_delta_net_impl<2>; +template [[host_name("kernel_gated_delta_net_f32_4")]] kernel kernel_gated_delta_net_t kernel_gated_delta_net_impl<4>; + +#else +// a simplified version of the above +// no performance improvement, so keep the above version for now + +template +kernel void kernel_gated_delta_net_impl( + constant ggml_metal_kargs_gated_delta_net & args, + device const char * q, + device const char * k, + device const char * v, + device const char * g, + device const char * b, + device const char * s, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { +#define S_v FC_gated_delta_net_ne20 +#define G FC_gated_delta_net_ne30 + + const uint tx = tpitg.x; + const uint ty = tpitg.y; + + const uint i23 = tgpig.z; // B + const uint i21 = tgpig.y; // H + const uint i20 = tgpig.x*NSG + ty; + + const uint i01 = i21 % args.ne01; + const uint i11 = i21 % args.ne11; + + const float scale = 1.0f / sqrt((float)S_v); + + device const float * s_ptr = (device const float *) (s) + (i23*args.ne21 + i21)*S_v*S_v + i20; + + float lsf[NSG]; + + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + lsf[j] = s_ptr[is*S_v]; + } + + thread T * ls = (thread T *) (lsf); + + device float * dst_attn = (device float *) (dst) + (i23*args.ne22*args.ne21 + i21)*S_v + i20; + + device const float * q_ptr = (device const float *) (q + i23*args.nb03 + i01*args.nb01); + device const float * k_ptr = (device const float *) (k + i23*args.nb13 + i11*args.nb11); + device const float * v_ptr = (device const float *) (v + i23*args.nb23 + i21*args.nb21); + + device const float * b_ptr = (device const float *) (b) + (i23*args.ne22*args.ne21 + i21); + device const float * g_ptr = (device const float *) (g) + (i23*args.ne22*args.ne21 + i21)*G; + + for (short t = 0; t < args.ne22; t++) { + device const T * qt_ptr = (device const T *) (q_ptr); + device const T * kt_ptr = (device const T *) (k_ptr); + device const T * gt_ptr = (device const T *) (g_ptr); + + if (G == 1) { + *ls *= exp(g_ptr[0]); + } else { + // KDA + *ls *= exp(gt_ptr[tx]); + } + + const float s_k = simd_sum(dot(*ls, kt_ptr[tx])); + + const float d = (v_ptr[i20] - s_k)*b_ptr[0]; + + *ls += kt_ptr[tx]*d; + + const float y = simd_sum(dot(*ls, qt_ptr[tx])); + + if (tx == 0) { + *dst_attn = y*scale; + } + + q_ptr += args.ns02; + k_ptr += args.ns12; + v_ptr += args.ns22; + + b_ptr += args.ne21; + g_ptr += args.ne21*G; + + dst_attn += args.ne21*S_v; + } + + device float * dst_state = (device float *) (dst) + args.ne23*args.ne22*args.ne21*S_v + (i23*args.ne21 + i21)*S_v*S_v + i20; + device T * dstt_state = (device T *) (dst_state); + + FOR_UNROLL (short j = 0; j < NSG; j++) { + const short is = tx*NSG + j; + dst_state[is*S_v] = lsf[j]; + } + +#undef S_v +#undef G +} + +typedef decltype(kernel_gated_delta_net_impl) kernel_gated_delta_net_t; + +template [[host_name("kernel_gated_delta_net_f32_1")]] kernel kernel_gated_delta_net_t kernel_gated_delta_net_impl; +template [[host_name("kernel_gated_delta_net_f32_2")]] kernel kernel_gated_delta_net_t kernel_gated_delta_net_impl; +template [[host_name("kernel_gated_delta_net_f32_4")]] kernel kernel_gated_delta_net_t kernel_gated_delta_net_impl; +#endif + constant short FC_solve_tri_nsg [[function_constant(FC_SOLVE_TRI + 0)]]; constant short FC_solve_tri_n [[function_constant(FC_SOLVE_TRI + 1)]]; constant short FC_solve_tri_k [[function_constant(FC_SOLVE_TRI + 2)]]; @@ -2782,7 +3006,7 @@ kernel void kernel_l2_norm_impl( sumf = shmem_f32[tiisg]; sumf = simd_sum(sumf); - const float scale = 1.0f/sqrt(max(sumf, args.eps)); + const float scale = 1.0f/max(sqrt(sumf), args.eps); for (int i00 = tpitg.x; i00 < args.ne00; i00 += ntg.x) { y[i00] = x[i00] * scale; diff --git a/ggml/src/ggml-opencl/kernels/cumsum.cl b/ggml/src/ggml-opencl/kernels/cumsum.cl new file mode 100644 index 000000000..edfb74b70 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/cumsum.cl @@ -0,0 +1,139 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#ifdef cl_intel_required_subgroup_size +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#endif + +// max workgroup size is usually 1024, this covers various subgroups sizes +#define MAX_SUBGROUPS 128 + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_32 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_cumsum_blk( + global char * src0, + ulong offset0, + global char * tmp, + global char * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + uint net0, + uint net1, + uint net2 +) { + src0 = src0 + offset0; + dst = dst + offsetd; + + const int i3 = get_group_id(2); + const int i2 = get_group_id(1); + const int i1 = get_group_id(0); + + const int nth = get_local_size(0); + const int tid = get_local_id(0); + + const uint sg_size = get_sub_group_size(); + const uint sg_id = get_sub_group_id(); + const uint sg_lid = get_sub_group_local_id(); + + const int ib = i1 / ne01; + const int i00 = ib * nth; + const int i01 = i1 % ne01; + const int i02 = i2; + const int i03 = i3; + + global const float * src0_row = (global const float *)(src0 + i03*nb03 + i02*nb02 + i01*nb01); + global float * tmp_row = (global float *)tmp + net0 * i01 + net0 * net1 * i02 + net0 * net1 * net2 * i03; + global float * dst_row = (global float *)dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + __local float partial[MAX_SUBGROUPS]; + + float v = 0.0f; + if (i00 + tid < ne00) { + v = src0_row[i00 + tid]; + } + + float s = sub_group_scan_inclusive_add(v); + if (sg_lid == sg_size - 1) { + partial[sg_id] = s; + } + barrier(CLK_LOCAL_MEM_FENCE); + + // NB: subgroup size should be larger than number of subgroups + // assuming max workgroup size of 1024, subgroup size should be >= 32 + if (sg_id == 0) { + float x = 0.0f; + if (sg_lid < get_num_sub_groups()) { + x = partial[sg_lid]; + } + float ex = sub_group_scan_exclusive_add(x); + if (sg_lid < get_num_sub_groups()) { + partial[sg_lid] = ex; + } + } + barrier(CLK_LOCAL_MEM_FENCE); + + s += partial[sg_id]; + + if (i00 + tid < ne00) { + dst_row[i00 + tid] = s; + } + if (ne00 > nth && tid == nth - 1) { + tmp_row[ib] = s; + } +} + +kernel void kernel_cumsum_add( + global char * tmp, + global char * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + uint nbt0, + uint nbt1, + uint nbt2, + uint nbt3 +) { + dst = dst + offsetd; + + const int i3 = get_group_id(2); + const int i2 = get_group_id(1); + const int i1 = get_group_id(0); + + const int nth = get_local_size(0); + const int tid = get_local_id(0); + + const int ib = i1 / ne01; + if (ib == 0) { + return; + } + const int i00 = ib * nth; + const int i01 = i1 % ne01; + const int i02 = i2; + const int i03 = i3; + + global float * tmp_row = (global float *)(tmp + nbt1 * i01 + nbt2 * i02 + nbt3 * i03); + global float * dst_row = (global float *)dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + if (i00 + tid < ne00) { + dst_row[i00 + tid] += tmp_row[ib - 1]; + } +} diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c index e8e25633f..cdaded865 100644 --- a/ggml/src/ggml-quants.c +++ b/ggml/src/ggml-quants.c @@ -304,6 +304,41 @@ void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RE } } +void quantize_row_nvfp4_ref(const float * GGML_RESTRICT x, block_nvfp4 * GGML_RESTRICT y, int64_t k) { + static const int qk = QK_NVFP4; + static const int qk_sub = QK_NVFP4_SUB; + static const int n_sub = QK_NVFP4 / QK_NVFP4_SUB; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + for (int s = 0; s < n_sub; s++) { + const float * xb = x + i*qk + s*qk_sub; + + float amax = 0.0f; + for (int j = 0; j < qk_sub; j++) { + if (amax < fabsf(xb[j])) { + amax = fabsf(xb[j]); + } + } + + // UE4M3 scale: amax / 6.0 maps the max E2M1 value (6.0) to amax + const uint8_t ue = ggml_fp32_to_ue4m3(amax / 6.0f); + y[i].d[s] = ue; + const float d = ggml_ue4m3_to_fp32(ue); + + for (int j = 0; j < qk_sub/2; ++j) { + const uint8_t x0 = best_index_mxfp4(xb[0 + j], d); + const uint8_t x1 = best_index_mxfp4(xb[qk_sub/2 + j], d); + + y[i].qs[s*(qk_sub/2) + j] = x0 | (x1 << 4); + } + } + } +} + void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK4_0; @@ -434,6 +469,31 @@ void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_REST } } +void dequantize_row_nvfp4(const block_nvfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { + static const int qk = QK_NVFP4; + static const int qk_sub = QK_NVFP4_SUB; + static const int n_sub = QK_NVFP4 / QK_NVFP4_SUB; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + for (int s = 0; s < n_sub; s++) { + const float d = ggml_ue4m3_to_fp32(x[i].d[s]); + float * yb = y + i*qk + s*qk_sub; + + for (int j = 0; j < qk_sub/2; ++j) { + const int8_t v0 = kvalues_mxfp4[x[i].qs[s*(qk_sub/2) + j] & 0x0F]; + const int8_t v1 = kvalues_mxfp4[x[i].qs[s*(qk_sub/2) + j] >> 4]; + + yb[j + 0 ] = v0*d; + yb[j + qk_sub/2] = v1*d; + } + } + } +} + // // 2-6 bit quantization in super-blocks // @@ -2098,6 +2158,12 @@ size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, return nrow * ggml_row_size(GGML_TYPE_MXFP4, n_per_row); } +size_t quantize_nvfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { + GGML_UNUSED(quant_weights); + quantize_row_nvfp4_ref(src, dst, (int64_t)nrow*n_per_row); + return nrow * ggml_row_size(GGML_TYPE_NVFP4, n_per_row); +} + // ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) void quantize_row_tq1_0_ref(const float * GGML_RESTRICT x, block_tq1_0 * GGML_RESTRICT y, int64_t k) { @@ -5244,6 +5310,12 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte { VALIDATE_ROW_DATA_E_E8M0_IMPL(block_mxfp4, data, nb); } break; + case GGML_TYPE_NVFP4: + { + // UE4M3 scales are uint8_t — all byte values are valid + GGML_UNUSED(data); + GGML_UNUSED(nb); + } break; case GGML_TYPE_Q2_K: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin); diff --git a/ggml/src/ggml-quants.h b/ggml/src/ggml-quants.h index 3b688f31c..00604f75c 100644 --- a/ggml/src/ggml-quants.h +++ b/ggml/src/ggml-quants.h @@ -22,6 +22,7 @@ GGML_API void quantize_row_q8_0_ref(const float * GGML_RESTRICT x, block_q8_0 * GGML_API void quantize_row_q8_1_ref(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k); +GGML_API void quantize_row_nvfp4_ref(const float * GGML_RESTRICT x, block_nvfp4 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q2_K_ref(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q3_K_ref(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k); @@ -48,6 +49,7 @@ GGML_API void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GG //GGML_API void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +GGML_API void dequantize_row_nvfp4(const block_nvfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); @@ -95,6 +97,7 @@ GGML_API size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTR GGML_API size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); +GGML_API size_t quantize_nvfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API void iq2xs_init_impl(enum ggml_type type); GGML_API void iq2xs_free_impl(enum ggml_type type); diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index e0bd98227..a4bc14e3b 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -39,6 +39,7 @@ DispatchLoaderDynamic & ggml_vk_default_dispatcher(); #include #include #include +#include #include #include #include @@ -204,6 +205,11 @@ struct ggml_backend_vk_buffer_type_context { struct vk_queue; +struct vk_command_buffer { + vk::CommandBuffer buf; + bool in_use = false; +}; + // Stores command pool/buffers. There's an instance of this // for each (context,queue) pair and for each (device,queue) pair. struct vk_command_pool { @@ -211,10 +217,16 @@ struct vk_command_pool { void destroy(vk::Device& device); vk::CommandPool pool; - uint32_t cmd_buffer_idx; - std::vector cmd_buffers; + // Using deque so the pointers to command buffers + // remain valid even if we add more + std::deque cmd_buffers; vk_queue *q; + + size_t buffers_in_use() const { + return std::count_if(cmd_buffers.begin(), cmd_buffers.end(), + [](const auto& cb) { return cb.in_use; }); + } }; // Prevent simultaneous submissions to the same queue. @@ -829,6 +841,8 @@ struct vk_device_struct { vk_pipeline pipeline_pool2d_f32; vk_pipeline pipeline_rwkv_wkv6_f32; vk_pipeline pipeline_rwkv_wkv7_f32; + // [size_idx][kda] where size_idx: 0=d32, 1=d64, 2=d128 + vk_pipeline pipeline_gated_delta_net[3][2]; vk_pipeline pipeline_ssm_scan_f32_d128; vk_pipeline pipeline_ssm_scan_f32_d256; vk_pipeline pipeline_ssm_conv_f32; @@ -894,10 +908,12 @@ struct vk_device_struct { }; void vk_command_pool::init(vk_device& device, vk_queue *q_) { - cmd_buffer_idx = 0; + cmd_buffers.clear(); q = q_; - vk::CommandPoolCreateInfo command_pool_create_info(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), q->queue_family_index); + vk::CommandPoolCreateInfo command_pool_create_info( + vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT), + q->queue_family_index); pool = device->device.createCommandPool(command_pool_create_info); } @@ -945,6 +961,7 @@ struct vk_subbuffer { struct vk_event { vk::Event event; vk::Fence fence; + vk_command_buffer* cmd_buffer = nullptr; }; struct vk_semaphore { @@ -953,7 +970,7 @@ struct vk_semaphore { }; struct vk_submission { - vk::CommandBuffer buffer; + vk_command_buffer* buffer = nullptr; std::vector wait_semaphores; std::vector signal_semaphores; }; @@ -1455,6 +1472,18 @@ struct vk_op_rwkv_wkv7_push_constants { uint32_t C; uint32_t H; }; +struct vk_op_gated_delta_net_push_constants { + uint32_t H; + uint32_t n_tokens; + uint32_t n_seqs; + uint32_t s_off; + uint32_t sq1, sq2, sq3; + uint32_t sv1, sv2, sv3; + uint32_t sb1, sb2, sb3; + uint32_t neq1, rq3; + float scale; +}; + struct vk_op_ssm_scan_push_constants { uint32_t nb02, nb03, nb12, nb13; uint32_t nb21, nb22, nb31; @@ -2299,25 +2328,15 @@ static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx } } -static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_command_pool& p) { +static vk_command_buffer* ggml_vk_create_cmd_buffer(vk_device& device, vk_command_pool& p) { VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()"); - - if (p.cmd_buffers.size() > p.cmd_buffer_idx) { - // Reuse command buffer - return p.cmd_buffers[p.cmd_buffer_idx++]; - } - vk::CommandBufferAllocateInfo command_buffer_alloc_info( p.pool, vk::CommandBufferLevel::ePrimary, 1); const std::vector cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info); - auto buf = cmd_buffers.front(); - - p.cmd_buffers.push_back(buf); - p.cmd_buffer_idx++; - - return buf; + p.cmd_buffers.push_back({ cmd_buffers.front(), true }); + return &p.cmd_buffers[p.cmd_buffers.size()-1]; } static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { @@ -2384,7 +2403,7 @@ static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { tl_wait_semaphores[idx].data(), stage_flags[idx].data(), 1, - &submission.buffer, + &submission.buffer->buf, (uint32_t) submission.signal_semaphores.size(), tl_signal_semaphores[idx].data(), }; @@ -2508,7 +2527,11 @@ static void ggml_vk_command_pool_cleanup(vk_device& device, vk_command_pool& p) // Requires command buffers to be done device->device.resetCommandPool(p.pool); - p.cmd_buffer_idx = 0; + // Don't clear the command buffers and mark them as not in use. + // This allows us to reuse them + for (auto& cmd_buffer : p.cmd_buffers) { + cmd_buffer.in_use = false; + } } static void ggml_vk_queue_command_pools_cleanup(vk_device& device) { @@ -2517,10 +2540,10 @@ static void ggml_vk_queue_command_pools_cleanup(vk_device& device) { // Arbitrary frequency to cleanup/reuse command buffers static constexpr uint32_t cleanup_frequency = 10; - if (device->compute_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) { + if (device->compute_queue.cmd_pool.buffers_in_use() >= cleanup_frequency) { ggml_vk_command_pool_cleanup(device, device->compute_queue.cmd_pool); } - if (device->transfer_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) { + if (device->transfer_queue.cmd_pool.buffers_in_use() >= cleanup_frequency) { ggml_vk_command_pool_cleanup(device, device->transfer_queue.cmd_pool); } } @@ -2768,7 +2791,7 @@ static void ggml_vk_sync_buffers(ggml_backend_vk_context* ctx, vk_context& subct ctx->prealloc_x_need_sync = ctx->prealloc_y_need_sync = ctx->prealloc_split_k_need_sync = false; } - subctx->s->buffer.pipelineBarrier( + subctx->s->buffer->buf.pipelineBarrier( subctx->p->q->stage_flags, subctx->p->q->stage_flags, {}, @@ -2784,7 +2807,7 @@ static void ggml_vk_sync_buffers(ggml_backend_vk_context* ctx, vk_context& subct static void ggml_vk_set_event(vk_context& ctx, vk::Event& event) { VK_LOG_DEBUG("ggml_vk_set_event()"); - ctx->s->buffer.setEvent( + ctx->s->buffer->buf.setEvent( event, ctx->p->q->stage_flags ); @@ -2796,7 +2819,7 @@ static void ggml_vk_wait_events(vk_context& ctx, std::vector&& events return; } - ctx->s->buffer.waitEvents( + ctx->s->buffer->buf.waitEvents( events, ctx->p->q->stage_flags, ctx->p->q->stage_flags, @@ -4575,6 +4598,23 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv7_f32, "rwkv_wkv7_f32", rwkv_wkv7_f32_len, rwkv_wkv7_f32_data, "main", 8, sizeof(vk_op_rwkv_wkv7_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); + { + const uint32_t gdn_sizes[] = {32, 64, 128}; + const char * gdn_names[][2] = { + {"gated_delta_net_f32_d32", "gated_delta_net_f32_d32_kda"}, + {"gated_delta_net_f32_d64", "gated_delta_net_f32_d64_kda"}, + {"gated_delta_net_f32_d128", "gated_delta_net_f32_d128_kda"}, + }; + for (uint32_t si = 0; si < 3; si++) { + for (uint32_t kda = 0; kda < 2; kda++) { + ggml_vk_create_pipeline(device, device->pipeline_gated_delta_net[si][kda], + gdn_names[si][kda], gated_delta_net_f32_len, gated_delta_net_f32_data, + "main", 7, sizeof(vk_op_gated_delta_net_push_constants), + {1, 1, 1}, {gdn_sizes[si], kda}, 1); + } + } + } + if (device->subgroup_arithmetic && device->subgroup_require_full_support) { ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d128, "ssm_scan_128_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {128, device->subgroup_size}, 1, true, true); ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_subgroup_f32_len, ssm_scan_subgroup_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size}, 1, true, true); @@ -4583,7 +4623,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_ssm_scan_f32_d256, "ssm_scan_256_f32", ssm_scan_f32_len, ssm_scan_f32_data, "main", 8, sizeof(vk_op_ssm_scan_push_constants), {1, 1, 1}, {256, device->subgroup_size, 16}, 1, true, true); } - ggml_vk_create_pipeline(device, device->pipeline_ssm_conv_f32, "ssm_conv_f32", ssm_conv_f32_len, ssm_conv_f32_data, "main", 3, sizeof(vk_op_ssm_conv_push_constants), {32, 1, 1}, {32}, 1); + ggml_vk_create_pipeline(device, device->pipeline_ssm_conv_f32, "ssm_conv_f32", ssm_conv_f32_len, ssm_conv_f32_data, "main", 3, sizeof(vk_op_ssm_conv_push_constants), {32, 16, 1}, {32, 16}, 1); ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); @@ -6378,13 +6418,24 @@ static vk_subbuffer ggml_vk_tensor_subbuffer( return vk_subbuffer{buffer, offset, size}; } +// Get a command buffer from pool. Create a new one if no reusable buffer is available +static vk_command_buffer* ggml_vk_get_or_create_cmd_buffer(vk_device& device, vk_command_pool& pool) { + for (auto& cmd_buffer : pool.cmd_buffers) { + if (!cmd_buffer.in_use) { + cmd_buffer.in_use = true; + return &cmd_buffer; + } + } + return ggml_vk_create_cmd_buffer(device, pool); +} + static vk_submission ggml_vk_begin_submission(vk_device& device, vk_command_pool& p, bool one_time = true) { vk_submission s; - s.buffer = ggml_vk_create_cmd_buffer(device, p); + s.buffer = ggml_vk_get_or_create_cmd_buffer(device, p); if (one_time) { - s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit }); + s.buffer->buf.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit }); } else { - s.buffer.begin({ vk::CommandBufferUsageFlags{} }); + s.buffer->buf.begin({ vk::CommandBufferUsageFlags{} }); } return s; @@ -6445,18 +6496,18 @@ static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() }; ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {}); - subctx->s->buffer.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size(push_constants), push_constant_data(push_constants)); - subctx->s->buffer.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline); - subctx->s->buffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute, + subctx->s->buffer->buf.pushConstants(pipeline->layout, vk::ShaderStageFlagBits::eCompute, 0, push_constant_size(push_constants), push_constant_data(push_constants)); + subctx->s->buffer->buf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->pipeline); + subctx->s->buffer->buf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, pipeline->layout, 0, { descriptor_set }, {}); - subctx->s->buffer.dispatch(wg0, wg1, wg2); + subctx->s->buffer->buf.dispatch(wg0, wg1, wg2); } static void ggml_vk_end_submission(vk_submission& s, std::vector wait_semaphores, std::vector signal_semaphores) { - s.buffer.end(); + s.buffer->buf.end(); s.wait_semaphores = std::move(wait_semaphores); s.signal_semaphores = std::move(signal_semaphores); @@ -6468,7 +6519,7 @@ static void ggml_vk_ctx_end(vk_context& ctx) { return; } - ctx->s->buffer.end(); + ctx->s->buffer->buf.end(); ctx->s = nullptr; } @@ -6622,7 +6673,7 @@ static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_cont } ggml_vk_sync_buffers(ctx, subctx); - subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices); + subctx->s->buffer->buf.copyBuffer(buf->buffer, dst->buffer, slices); return; } @@ -6637,7 +6688,7 @@ static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_cont VkBufferCopy buf_copy{ 0, offset, copy_size }; ggml_vk_sync_buffers(ctx, subctx); - vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging->buffer, (VkBuffer)dst->buffer, 1, &buf_copy); + vkCmdCopyBuffer(subctx->s->buffer->buf, (VkBuffer)staging->buffer, (VkBuffer)dst->buffer, 1, &buf_copy); for (uint64_t i3 = 0; i3 < ne3; i3++) { for (uint64_t i2 = 0; i2 < ne2; i2++) { @@ -6686,7 +6737,7 @@ static bool ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, siz } ggml_vk_sync_buffers(nullptr, subctx); - subctx->s->buffer.copyBuffer(buf->buffer, dst->buffer, slices); + subctx->s->buffer->buf.copyBuffer(buf->buffer, dst->buffer, slices); return true; } VK_LOG_DEBUG("STAGING"); @@ -6708,7 +6759,7 @@ static bool ggml_vk_buffer_write_2d_async(vk_context subctx, vk_buffer& dst, siz copy_size}; ggml_vk_sync_buffers(nullptr, subctx); - vkCmdCopyBuffer(subctx->s->buffer, (VkBuffer)staging_buffer->buffer, (VkBuffer)dst->buffer, 1, &buf_copy); + vkCmdCopyBuffer(subctx->s->buffer->buf, (VkBuffer)staging_buffer->buffer, (VkBuffer)dst->buffer, 1, &buf_copy); if (width == spitch) { deferred_memcpy((uint8_t *)staging_buffer->ptr, src, width * height, &subctx->in_memcpys); @@ -6794,7 +6845,7 @@ static bool ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size if (buf != nullptr) { // Memory is pinned, use as staging buffer ggml_vk_sync_buffers(nullptr, subctx); - subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices); + subctx->s->buffer->buf.copyBuffer(src->buffer, buf->buffer, slices); return true; } @@ -6812,7 +6863,7 @@ static bool ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size vk_buffer& staging_buffer = src->device->sync_staging; ggml_vk_sync_buffers(nullptr, subctx); - subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices); + subctx->s->buffer->buf.copyBuffer(src->buffer, staging_buffer->buffer, slices); deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys); return true; @@ -6859,7 +6910,7 @@ static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t ds VkBufferCopy bc{ src_offset, dst_offset, size }; - vkCmdCopyBuffer(ctx->s->buffer, (VkBuffer)src->buffer, (VkBuffer)dst->buffer, 1, &bc); + vkCmdCopyBuffer(ctx->s->buffer->buf, (VkBuffer)src->buffer, (VkBuffer)dst->buffer, 1, &bc); } static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) { @@ -6897,7 +6948,7 @@ static void ggml_vk_buffer_memset_async(vk_context& ctx, vk_buffer& dst, size_t } // Fall back to GPU fillBuffer for non-UMA or non-host-visible buffers - ctx->s->buffer.fillBuffer(dst->buffer, offset, size, c); + ctx->s->buffer->buf.fillBuffer(dst->buffer, offset, size, c); } static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) { @@ -6912,7 +6963,7 @@ static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, siz std::lock_guard guard(dst->device->mutex); vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(dst->device, subctx); - subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c); + subctx->s->buffer->buf.fillBuffer(dst->buffer, offset, size, c); ggml_vk_ctx_end(subctx); ggml_vk_submit(subctx, dst->device->fence); @@ -8858,7 +8909,7 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx } // Only use mask opt when the mask is fairly large. This hasn't been tuned extensively. - bool use_mask_opt = mask && nem1 >= 32 && nem0 * nem1 > 32768; + bool use_mask_opt = mask && nem1 >= 32 && nem0 * nem1 > 32768 && nem0 >= tuning_params.block_cols * 16; vk_fa_pipeline_state fa_pipeline_state = get_fa_pipeline_state(ctx->device, tuning_params, HSK, HSV, aligned, f32acc, mask != nullptr, use_mask_opt, logit_softcap != 0); @@ -9516,6 +9567,20 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_rwkv_wkv7_f32; } return nullptr; + case GGML_OP_GATED_DELTA_NET: + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + const uint32_t S_v = dst->src[2]->ne[0]; + const uint32_t kda = (dst->src[3]->ne[0] == (int64_t)S_v) ? 1 : 0; + uint32_t si; + switch (S_v) { + case 32: si = 0; break; + case 64: si = 1; break; + case 128: si = 2; break; + default: return nullptr; + } + return ctx->device->pipeline_gated_delta_net[si][kda]; + } + return nullptr; case GGML_OP_SSM_SCAN: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { const uint32_t d_state = src0->ne[0]; @@ -10346,6 +10411,59 @@ static void ggml_vk_rwkv_wkv7(ggml_backend_vk_context * ctx, vk_context& subctx, ); } +static void ggml_vk_gated_delta_net(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) { + const ggml_tensor * src_q = dst->src[0]; + const ggml_tensor * src_v = dst->src[2]; + const ggml_tensor * src_beta = dst->src[4]; + + GGML_ASSERT(dst->buffer != nullptr); + + const uint32_t S_v = (uint32_t)src_v->ne[0]; + const uint32_t H = (uint32_t)src_v->ne[1]; + const uint32_t n_tokens = (uint32_t)src_v->ne[2]; + const uint32_t n_seqs = (uint32_t)src_v->ne[3]; + + const uint32_t s_off = S_v * H * n_tokens * n_seqs; + + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, dst->src[0], dst->src[1], dst->src[2], dst, dst->op); + GGML_ASSERT(pipeline != nullptr); + + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); + + vk_subbuffer dst_buf = ggml_vk_tensor_subbuffer(ctx, dst); + vk_subbuffer src_buf[6] = {}; + for (int i = 0; i < 6; i++) { + src_buf[i] = ggml_vk_tensor_subbuffer(ctx, dst->src[i]); + } + + const uint32_t sq1 = (uint32_t)(src_q->nb[1] / sizeof(float)); + const uint32_t sq2 = (uint32_t)(src_q->nb[2] / sizeof(float)); + const uint32_t sq3 = (uint32_t)(src_q->nb[3] / sizeof(float)); + const uint32_t sv1 = (uint32_t)(src_v->nb[1] / sizeof(float)); + const uint32_t sv2 = (uint32_t)(src_v->nb[2] / sizeof(float)); + const uint32_t sv3 = (uint32_t)(src_v->nb[3] / sizeof(float)); + const uint32_t sb1 = (uint32_t)(src_beta->nb[1] / sizeof(float)); + const uint32_t sb2 = (uint32_t)(src_beta->nb[2] / sizeof(float)); + const uint32_t sb3 = (uint32_t)(src_beta->nb[3] / sizeof(float)); + + const uint32_t neq1 = (uint32_t)src_q->ne[1]; + const uint32_t rq3 = (uint32_t)(src_v->ne[3] / src_q->ne[3]); + + const float scale = 1.0f / sqrtf((float)S_v); + const vk_op_gated_delta_net_push_constants pc = { + H, n_tokens, n_seqs, s_off, + sq1, sq2, sq3, + sv1, sv2, sv3, + sb1, sb2, sb3, + neq1, rq3, + scale + }; + + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, + {src_buf[0], src_buf[1], src_buf[2], src_buf[3], src_buf[4], src_buf[5], dst_buf}, + pc, { H, n_seqs, 1u }); +} + static void ggml_vk_ssm_scan(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; @@ -12720,7 +12838,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr if (vk_perf_logger_enabled && vk_perf_logger_concurrent) { ctx->query_node_idx[ctx->query_idx] = node_idx; - compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->query_pool, ctx->query_idx++); + compute_ctx->s->buffer->buf.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->query_pool, ctx->query_idx++); } } // Add all fused nodes to the unsynchronized lists. @@ -13062,6 +13180,11 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr break; + case GGML_OP_GATED_DELTA_NET: + ggml_vk_gated_delta_net(ctx, compute_ctx, node); + + break; + case GGML_OP_SSM_SCAN: ggml_vk_ssm_scan(ctx, compute_ctx, node); @@ -13559,7 +13682,7 @@ static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor buffer_cpy.dstOffset = dst_offset; buffer_cpy.size = size; - cpy_ctx->s->buffer.copyBuffer(ctx->sync_staging->buffer, buf->buffer, { buffer_cpy }); + cpy_ctx->s->buffer->buf.copyBuffer(ctx->sync_staging->buffer, buf->buffer, { buffer_cpy }); deferred_memcpy(ctx->sync_staging->ptr, data, size, &cpy_ctx->in_memcpys); ggml_vk_synchronize(ctx); } @@ -13593,7 +13716,7 @@ static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_ buffer_cpy.dstOffset = 0; buffer_cpy.size = size; - compute_ctx->s->buffer.copyBuffer(buf->buffer, ctx->sync_staging->buffer, { buffer_cpy }); + compute_ctx->s->buffer->buf.copyBuffer(buf->buffer, ctx->sync_staging->buffer, { buffer_cpy }); deferred_memcpy(data, ctx->sync_staging->ptr, size, &compute_ctx->out_memcpys); ggml_vk_synchronize(ctx); } @@ -13671,8 +13794,12 @@ static void ggml_vk_synchronize(ggml_backend_vk_context * ctx) { } vk_context compute_ctx; + vk_command_buffer* cmd_buf = nullptr; if (do_transfer) { compute_ctx = ctx->compute_ctx.lock(); + if (compute_ctx->s) { + cmd_buf = compute_ctx->s->buffer; + } ggml_vk_ctx_end(compute_ctx); @@ -13706,6 +13833,9 @@ static void ggml_vk_synchronize(ggml_backend_vk_context * ctx) { } ggml_vk_wait_for_fence(ctx); ctx->submit_pending = false; + if (cmd_buf) { + cmd_buf->in_use = false; + } } if (do_transfer) { @@ -14195,7 +14325,7 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg GGML_ASSERT(ctx->compute_ctx.expired()); compute_ctx = ggml_vk_get_compute_ctx(ctx); ctx->query_idx = 0; - compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->query_pool, ctx->query_idx++); + compute_ctx->s->buffer->buf.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->query_pool, ctx->query_idx++); } ctx->prealloc_y_last_pipeline_used = nullptr; @@ -14431,7 +14561,7 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg // track a single node/fusion for the current query ctx->query_nodes[ctx->query_idx] = cgraph->nodes[i]; ctx->query_fusion_names[ctx->query_idx] = fusion_string; - compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->query_pool, ctx->query_idx++); + compute_ctx->s->buffer->buf.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->query_pool, ctx->query_idx++); } else { // track a fusion string and number of fused ops for the current node_idx ctx->query_fusion_names[i] = fusion_string; @@ -14764,6 +14894,7 @@ static void ggml_backend_vk_event_record(ggml_backend_t backend, ggml_backend_ev ggml_vk_submit_transfer_ctx(ctx); vk_context compute_ctx = ggml_vk_get_compute_ctx(ctx); + auto* cmd_buf = compute_ctx->s->buffer; // retrieve pointer before it gets reset // the backend interface doesn't have an explicit reset, so reset it here // before we record the command to set it @@ -14776,6 +14907,7 @@ static void ggml_backend_vk_event_record(ggml_backend_t backend, ggml_backend_ev ggml_vk_submit(compute_ctx, {vkev->fence}); ctx->submit_pending = true; + vkev->cmd_buffer = cmd_buf; ctx->compute_ctx.reset(); } @@ -15464,6 +15596,19 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_RWKV_WKV6: case GGML_OP_RWKV_WKV7: return true; // all inputs are contiguous, see ggml.c + case GGML_OP_GATED_DELTA_NET: + { + const uint32_t S_v = op->src[2]->ne[0]; + if (S_v != 32 && S_v != 64 && S_v != 128) { + return false; + } + for (int i = 0; i < 6; i++) { + if (op->src[i] == nullptr || op->src[i]->type != GGML_TYPE_F32) { + return false; + } + } + return op->type == GGML_TYPE_F32; + } case GGML_OP_SSM_SCAN: { for (int i = 0; i < 6; i++) { @@ -15595,6 +15740,10 @@ static void ggml_backend_vk_device_event_synchronize(ggml_backend_dev_t dev, ggm vk_event *vkev = (vk_event *)event->context; VK_CHECK(device->device.waitForFences({ vkev->fence }, true, UINT64_MAX), "event_synchronize"); + // Finished using current command buffer so we flag for reuse + if (vkev->cmd_buffer) { + vkev->cmd_buffer->in_use = false; + } } static vk_buffer ggml_vk_buffer_from_host_ptr(vk_device & device, void * ptr, size_t size) { @@ -16066,7 +16215,7 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * tensor_clone = ggml_arange(ggml_ctx, start, stop, step); } else if (tensor->op == GGML_OP_FILL) { const float value = ggml_get_op_params_f32(tensor, 0); - tensor_clone = ggml_fill(ggml_ctx, tensor_clone, value); + tensor_clone = ggml_fill(ggml_ctx, src_clone[0], value); } else if (tensor->op == GGML_OP_SQR) { tensor_clone = ggml_sqr(ggml_ctx, src_clone[0]); } else if (tensor->op == GGML_OP_SQRT) { @@ -16337,6 +16486,9 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * } else if (tensor->op == GGML_OP_RWKV_WKV7) { tensor_clone = ggml_rwkv_wkv7(ggml_ctx, src_clone[0], src_clone[1], src_clone[2], src_clone[3], src_clone[4], src_clone[5], src_clone[6]); + } else if (tensor->op == GGML_OP_GATED_DELTA_NET) { + tensor_clone = ggml_gated_delta_net(ggml_ctx, src_clone[0], src_clone[1], + src_clone[2], src_clone[3], src_clone[4], src_clone[5]); } else if (tensor->op == GGML_OP_OPT_STEP_ADAMW) { src_clone[0]->flags = tensor->src[0]->flags; tensor_clone = ggml_opt_step_adamw(ggml_ctx, src_clone[0], src_clone[1], diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp index 8c92c1adc..0e4177080 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp @@ -33,6 +33,61 @@ layout (push_constant) uniform parameter { shared float minsh[NUM_SUBGROUPS]; shared float maxsh[NUM_SUBGROUPS]; +float FLT_MAX_OVER_2 = uintBitsToFloat(0x7EFFFFFF); + +void loadvec4(inout uint result, const uint i0, const uint i1, const uint i2, const uint i3, const bool need_bounds_check) { + const uint tid = gl_LocalInvocationIndex; + + [[unroll]] for (uint block_x = 0; block_x < 16; ++block_x) { + float min_v = FLT_MAX_OVER_2; + float max_v = -FLT_MAX_OVER_2; + [[unroll]] for (uint i = 0; i < Br * Bc / 4; i += BLOCK_SIZE) { + uint j0 = (i + tid) % (Bc / 4); + uint j1 = (i + tid) / (Bc / 4); + + j0 *= 4; + j0 += (i0 * 16 + block_x) * Bc; + j1 += i1 * Br; + + if (!need_bounds_check || j0 + 3 < nem0) { + vec4 f = vec4(data_av4[(j0 + j1 * nbm1 + i2 * nbm2 + i3 * nbm3) / 4]); + [[unroll]] for (int c = 0; c < 4; ++c) { + min_v = min(min_v, f[c]); + max_v = max(max_v, f[c]); + } + } else { + [[unroll]] for (int c = 0; c < 4; ++c) { + if (j0 + c < nem0) { + float f = float(data_a[j0 + j1 * nbm1 + i2 * nbm2 + i3 * nbm3]); + min_v = min(min_v, f); + max_v = max(max_v, f); + } + } + } + } + min_v = subgroupMin(min_v); + max_v = subgroupMax(max_v); + if (gl_SubgroupInvocationID == 0) { + minsh[gl_SubgroupID] = min_v; + maxsh[gl_SubgroupID] = max_v; + } + barrier(); + if (tid == 0) { + [[unroll]] for (uint i = 0; i < NUM_SUBGROUPS; ++i) { + min_v = min(min_v, minsh[i]); + max_v = max(max_v, maxsh[i]); + } + if (max_v <= -FLT_MAX_OVER_2) { + result |= 1 << (2*block_x); + } + if (min_v == 0.0f && max_v == 0.0f) { + result |= 2 << (2*block_x); + } + } + barrier(); + } +} + // For each Br x Bc block of the mask (input) buffer, read all values and check // if it's all -inf or all zero. Write out a two-bit code indicating which it is // (or zero for neither). Each workgroup processes 16 tiles and writes out a @@ -48,50 +103,15 @@ void main() { const uint i2 = gl_WorkGroupID.z % nem2; const uint i3 = gl_WorkGroupID.z / nem2; - float FLT_MAX_OVER_2 = uintBitsToFloat(0x7EFFFFFF); - uint result = 0; // Fast path for fully in-bounds blocks where we can do f16vec4 loads if ((nem0 % Bc) == 0 && (nem1 % Br) == 0 && ((Br * Bc) % (BLOCK_SIZE * 4)) == 0) { - [[unroll]] for (uint block_x = 0; block_x < 16; ++block_x) { - float min_v = FLT_MAX_OVER_2; - float max_v = -FLT_MAX_OVER_2; - [[unroll]] for (uint i = 0; i < Br * Bc / 4; i += BLOCK_SIZE) { - uint j0 = (i + tid) % (Bc / 4); - uint j1 = (i + tid) / (Bc / 4); - - j0 *= 4; - j0 += (i0 * 16 + block_x) * Bc; - j1 += i1 * Br; - - vec4 f = vec4(data_av4[(j0 + j1 * nbm1 + i2 * nbm2 + i3 * nbm3) / 4]); - [[unroll]] for (int c = 0; c < 4; ++c) { - min_v = min(min_v, f[c]); - max_v = max(max_v, f[c]); - } - } - min_v = subgroupMin(min_v); - max_v = subgroupMax(max_v); - if (gl_SubgroupInvocationID == 0) { - minsh[gl_SubgroupID] = min_v; - maxsh[gl_SubgroupID] = max_v; - } - barrier(); - if (tid == 0) { - [[unroll]] for (uint i = 0; i < NUM_SUBGROUPS; ++i) { - min_v = min(min_v, minsh[i]); - max_v = max(max_v, maxsh[i]); - } - if (max_v <= -FLT_MAX_OVER_2) { - result |= 1 << (2*block_x); - } - if (min_v == 0.0f && max_v == 0.0f) { - result |= 2 << (2*block_x); - } - } - barrier(); + if ((i0 + 1) * 16 * Bc <= nem0) { + loadvec4(result, i0, i1, i2, i3, false); + } else { + loadvec4(result, i0, i1, i2, i3, true); } } else { [[unroll]] for (uint block_x = 0; block_x < 16; ++block_x) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp b/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp new file mode 100644 index 000000000..1fdf889e8 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp @@ -0,0 +1,128 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : require + +layout(constant_id = 0) const uint S_V = 128; +layout(constant_id = 1) const uint KDA = 0; + +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout(push_constant) uniform Parameters { + uint H; + uint n_tokens; + uint n_seqs; + uint s_off; + uint sq1, sq2, sq3; + uint sv1, sv2, sv3; + uint sb1, sb2, sb3; + uint neq1, rq3; + float scale; +}; + +layout(binding = 0) readonly buffer QBuf { FLOAT_TYPE data_q[]; }; +layout(binding = 1) readonly buffer KBuf { FLOAT_TYPE data_k[]; }; +layout(binding = 2) readonly buffer VBuf { FLOAT_TYPE data_v[]; }; +layout(binding = 3) readonly buffer GBuf { FLOAT_TYPE data_g[]; }; +layout(binding = 4) readonly buffer BetaBuf { FLOAT_TYPE data_beta[]; }; +layout(binding = 5) readonly buffer StateBuf { FLOAT_TYPE data_state[]; }; +layout(binding = 6) buffer DstBuf { FLOAT_TYPE data_dst[]; }; + +shared FLOAT_TYPE s_k[S_V]; +shared FLOAT_TYPE s_q[S_V]; +shared FLOAT_TYPE s_g[S_V]; // KDA only: cached exp(g[i]) + +void main() { + const uint head_id = gl_WorkGroupID.x; + const uint seq_id = gl_WorkGroupID.y; + const uint col = gl_LocalInvocationID.x; + + const uint iq1 = head_id % neq1; + const uint iq3 = seq_id / rq3; + + const uint state_size = S_V * S_V; + const uint state_base = (seq_id * H + head_id) * state_size; + + FLOAT_TYPE state[S_V]; + [[unroll]] for (uint i = 0; i < S_V; i++) { + state[i] = FLOAT_TYPE(data_state[state_base + i * S_V + col]); + } + + uint attn_off = (seq_id * n_tokens * H + head_id) * S_V; + + for (uint t = 0; t < n_tokens; t++) { + const uint q_off = iq3 * sq3 + t * sq2 + iq1 * sq1; + const uint k_off = q_off; + const uint v_off = seq_id * sv3 + t * sv2 + head_id * sv1; + + s_q[col] = FLOAT_TYPE(data_q[q_off + col]); + s_k[col] = FLOAT_TYPE(data_k[k_off + col]); + + const uint gb_off = seq_id * sb3 + t * sb2 + head_id * sb1; + + if (KDA != 0) { + const uint g_base = gb_off * S_V; + s_g[col] = exp(FLOAT_TYPE(data_g[g_base + col])); + } + + barrier(); + + const FLOAT_TYPE v_val = FLOAT_TYPE(data_v[v_off + col]); + const FLOAT_TYPE beta_val = FLOAT_TYPE(data_beta[gb_off]); + + if (KDA == 0) { + const FLOAT_TYPE g_val = exp(FLOAT_TYPE(data_g[gb_off])); + + FLOAT_TYPE kv_col = 0.0; + [[unroll]] for (uint i = 0; i < S_V; i += 4) { + kv_col += dot( + vec4(state[i], state[i+1], state[i+2], state[i+3]), + vec4(s_k[i], s_k[i+1], s_k[i+2], s_k[i+3]) + ); + } + + FLOAT_TYPE delta_col = (v_val - g_val * kv_col) * beta_val; + + FLOAT_TYPE attn_col = 0.0; + [[unroll]] for (uint i = 0; i < S_V; i += 4) { + vec4 sv = vec4(state[i], state[i+1], state[i+2], state[i+3]); + vec4 kv = vec4(s_k[i], s_k[i+1], s_k[i+2], s_k[i+3]); + sv = g_val * sv + kv * delta_col; + state[i] = sv.x; state[i+1] = sv.y; state[i+2] = sv.z; state[i+3] = sv.w; + + attn_col += dot(sv, vec4(s_q[i], s_q[i+1], s_q[i+2], s_q[i+3])); + } + + data_dst[attn_off + col] = attn_col * scale; + } else { + FLOAT_TYPE kv_col = 0.0; + [[unroll]] for (uint i = 0; i < S_V; i += 4) { + vec4 gv = vec4(s_g[i], s_g[i+1], s_g[i+2], s_g[i+3]); + vec4 sv = vec4(state[i], state[i+1], state[i+2], state[i+3]); + vec4 kv = vec4(s_k[i], s_k[i+1], s_k[i+2], s_k[i+3]); + kv_col += dot(gv * sv, kv); + } + + FLOAT_TYPE delta_col = (v_val - kv_col) * beta_val; + + FLOAT_TYPE attn_col = 0.0; + [[unroll]] for (uint i = 0; i < S_V; i += 4) { + vec4 gv = vec4(s_g[i], s_g[i+1], s_g[i+2], s_g[i+3]); + vec4 sv = vec4(state[i], state[i+1], state[i+2], state[i+3]); + vec4 kv = vec4(s_k[i], s_k[i+1], s_k[i+2], s_k[i+3]); + sv = gv * sv + kv * delta_col; + state[i] = sv.x; state[i+1] = sv.y; state[i+2] = sv.z; state[i+3] = sv.w; + + attn_col += dot(sv, vec4(s_q[i], s_q[i+1], s_q[i+2], s_q[i+3])); + } + + data_dst[attn_off + col] = attn_col * scale; + } + + attn_off += S_V * H; + barrier(); + } + + [[unroll]] for (uint i = 0; i < S_V; i++) { + data_dst[s_off + state_base + i * S_V + col] = state[i]; + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp index 7d0a1de0d..f9af46744 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp @@ -36,7 +36,7 @@ void main() { barrier(); } - const FLOAT_TYPE scale = inversesqrt(max(sum[0], FLOAT_TYPE(p.param1))); + const FLOAT_TYPE scale = 1.0f / max(sqrt(sum[0]), FLOAT_TYPE(p.param1)); [[unroll]] for (uint i0 = tid; i0 < p.ne00; i0 += BLOCK_SIZE) { data_d[i3*p.nb13 + i2*p.nb12 + i1*p.nb11 + i0] = D_TYPE(scale * FLOAT_TYPE(data_a[i3*p.nb03 + i2*p.nb02 + i1*p.nb01 + i0])); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp b/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp index d62696bcf..6802b1fc9 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp @@ -5,8 +5,9 @@ #include "types.glsl" layout(constant_id = 0) const uint BLOCK_SIZE = 32; +layout(constant_id = 1) const uint TOKENS_PER_WG = 16; -layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; +layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z = 1) in; layout(binding = 0) readonly buffer Src0 { float src0[]; }; layout(binding = 1) readonly buffer Src1 { float src1[]; }; @@ -20,25 +21,30 @@ layout(push_constant) uniform PushConstants { }; void main() { - const uint global_thread_id = gl_GlobalInvocationID.x; - const uint i2 = gl_WorkGroupID.y; + const uint i1 = gl_GlobalInvocationID.x; + const uint i2 = gl_WorkGroupID.y * TOKENS_PER_WG + gl_LocalInvocationID.y; const uint i3 = gl_WorkGroupID.z; - if (global_thread_id >= nr || i2 >= n_t || i3 >= n_s) { + if (i1 >= nr || i2 >= n_t || i3 >= n_s) { return; } - const uint i1 = global_thread_id; const uint src0_base = i3 * (nb02 / 4) + i2 + i1 * (nb01 / 4); const uint src1_base = i1 * (nb11 / 4); - const uint dst_idx = i3 * (dst_nb2 / 4) + i2 * (dst_nb1 / 4) + i1; float sum = 0.0; - [[unroll]] for (uint i0 = 0; i0 < nc; i0++) { - const uint src0_idx = src0_base + i0; - const uint src1_idx = src1_base + i0; - sum += src0[src0_idx] * src1[src1_idx]; + + if (nc == 4) { + sum = dot( + vec4(src0[src0_base], src0[src0_base + 1], src0[src0_base + 2], src0[src0_base + 3]), + vec4(src1[src1_base], src1[src1_base + 1], src1[src1_base + 2], src1[src1_base + 3]) + ); + } else { + [[unroll]] for (uint i0 = 0; i0 < nc; i0++) { + sum += src0[src0_base + i0] * src1[src1_base + i0]; + } } + const uint dst_idx = i3 * (dst_nb2 / 4) + i2 * (dst_nb1 / 4) + i1; dst[dst_idx] = sum; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index a5da777fa..80db516a9 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -1004,6 +1004,8 @@ void process_shaders() { string_to_spv("rwkv_wkv7_f32", "wkv7.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); + string_to_spv("gated_delta_net_f32", "gated_delta_net.comp", merge_maps(base_dict, {{"FLOAT_TYPE", "float"}})); + string_to_spv("opt_step_adamw_f32", "opt_step_adamw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); string_to_spv("opt_step_sgd_f32", "opt_step_sgd.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); diff --git a/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl b/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl new file mode 100644 index 000000000..6e2a1a8b6 --- /dev/null +++ b/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl @@ -0,0 +1,67 @@ +enable f16; + +struct Params { + ne: u32, + + offset_src0: u32, + offset_dst: u32, + + stride_src0_0: u32, + stride_src0_1: u32, + stride_src0_2: u32, + stride_src0_3: u32, + + a_ne0: u32, + a_ne1: u32, + a_ne2: u32, + a_ne3: u32, + + ne0: u32, + ne1: u32, + ne2: u32, +}; + +#ifdef TYPE_F32 +#define DataType f32 +#endif +#ifdef TYPE_I32 +#define DataType i32 +#endif +#ifdef TYPE_I16 +// same size (16-bit) is sufficient for repeat +#define DataType f16 +#endif + +@group(0) @binding(0) +var src0: array; + +@group(0) @binding(1) +var dst: array; + +@group(0) @binding(2) +var params: Params; + +@compute @workgroup_size(WG_SIZE) +fn main(@builtin(global_invocation_id) gid: vec3) { + if (gid.x < params.ne) { + var i = gid.x; + let i3 = i / (params.ne2 * params.ne1 * params.ne0); + i = i % (params.ne2 * params.ne1 * params.ne0); + let i2 = i / (params.ne1 * params.ne0); + i = i % (params.ne1 * params.ne0); + let i1 = i / params.ne0; + let i0 = i % params.ne0; + + let a_i0 = i0 % params.a_ne0; + let a_i1 = i1 % params.a_ne1; + let a_i2 = i2 % params.a_ne2; + let a_i3 = i3 % params.a_ne3; + + let a_index = a_i0 * params.stride_src0_0 + + a_i1 * params.stride_src0_1 + + a_i2 * params.stride_src0_2 + + a_i3 * params.stride_src0_3; + + dst[params.offset_dst + gid.x] = src0[params.offset_src0 + a_index]; + } +} diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 1bea7e8f7..33a7cd459 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -722,6 +722,14 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { .to_float = (ggml_to_float_t) dequantize_row_mxfp4, .from_float_ref = (ggml_from_float_t)quantize_row_mxfp4_ref, }, + [GGML_TYPE_NVFP4] = { + .type_name = "nvfp4", + .blck_size = QK_NVFP4, + .type_size = sizeof(block_nvfp4), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_nvfp4, + .from_float_ref = (ggml_from_float_t)quantize_row_nvfp4_ref, + }, [GGML_TYPE_Q2_K] = { .type_name = "q2_K", .blck_size = QK_K, @@ -1390,6 +1398,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break; case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break; case GGML_FTYPE_MOSTLY_MXFP4: wtype = GGML_TYPE_MXFP4; break; + case GGML_FTYPE_MOSTLY_NVFP4: wtype = GGML_TYPE_NVFP4; break; case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break; case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break; case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break; @@ -7657,6 +7666,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_Q5_1: result = quantize_q5_1(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q8_0: result = quantize_q8_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_MXFP4: result = quantize_mxfp4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_NVFP4: result = quantize_nvfp4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q2_K: result = quantize_q2_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q3_K: result = quantize_q3_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q4_K: result = quantize_q4_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 32fc9428a..bf617382d 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -3784,6 +3784,7 @@ class GGMLQuantizationType(IntEnum): TQ1_0 = 34 TQ2_0 = 35 MXFP4 = 39 + NVFP4 = 40 class ExpertGatingFuncType(IntEnum): @@ -3880,6 +3881,7 @@ class VisionProjectorType: GEMMA3 = "gemma3" GEMMA3NV = "gemma3nv" GEMMA3NA = "gemma3na" + PHI4 = "phi4" IDEFICS3 = "idefics3" PIXTRAL = "pixtral" LLAMA4 = "llama4" @@ -3941,6 +3943,7 @@ GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = { GGMLQuantizationType.TQ1_0: (256, 2 + 4 * 13), GGMLQuantizationType.TQ2_0: (256, 2 + 64), GGMLQuantizationType.MXFP4: (32, 1 + 16), + GGMLQuantizationType.NVFP4: (64, 4 + 32), } diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index c89a5fdc3..662dda3cf 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -139,10 +139,13 @@ class GGUFWriter: size = prod(shape) if "_exps." in name: - expert_count = shape[-2 if ".bias" in name else -3] - expert_params += (size // expert_count) - expert_sum += expert_count - n_expert_tensors += 1 + if len(shape) >= 3: + expert_count = shape[-2 if ".bias" in name else -3] + expert_params += (size // expert_count) + expert_sum += expert_count + n_expert_tensors += 1 + else: + shared_params += size else: shared_params += size diff --git a/gguf-py/gguf/quants.py b/gguf-py/gguf/quants.py index 31845ea6e..1cd519981 100644 --- a/gguf-py/gguf/quants.py +++ b/gguf-py/gguf/quants.py @@ -704,6 +704,65 @@ class MXFP4(__Quant, qtype=GGMLQuantizationType.MXFP4): return (d * qs.astype(np.float32)) +class NVFP4(__Quant, qtype=GGMLQuantizationType.NVFP4): + # E2M1 values doubled (kvalues_mxfp4 convention) + kvalues = (0, 1, 2, 3, 4, 6, 8, 12, 0, -1, -2, -3, -4, -6, -8, -12) + + @staticmethod + def ue4m3_to_fp32(x: np.ndarray) -> np.ndarray: + """Decode unsigned E4M3 (bias=7) to float, with 0.5 factor for kvalues convention.""" + exp = (x >> 3).astype(np.int32) & 0xF + man = (x & 0x7).astype(np.float32) + raw = np.where( + exp == 0, + man * 2**-9, + (1.0 + man / 8.0) * (2.0 ** (exp.astype(np.float32) - 7))) + return np.where((x == 0) | (x == 0x7F), 0.0, raw * 0.5) + + @staticmethod + def fp32_to_ue4m3(x: np.ndarray) -> np.ndarray: + """Vectorized float32 to unsigned E4M3, matching ggml_fp32_to_ue4m3 in C.""" + x = np.clip(x, 0.0, 448.0).astype(np.float32) + bits = x.view(np.uint32) + fp32_exp = ((bits >> 23) & 0xFF).astype(np.int32) - 127 + fp32_man = ((bits >> 20) & 0x7).astype(np.int32) + ue4m3_exp = fp32_exp + 7 + + # Subnormal + sub_man = np.clip((x * 512.0 + 0.5).astype(np.int32), 0, 7) + sub_result = np.where(sub_man >= 1, sub_man, 0).astype(np.uint8) + + # Normal with rounding + round_bit = ((bits >> 19) & 1).astype(np.int32) + man = fp32_man + round_bit + exp = ue4m3_exp.copy() + overflow = man > 7 + man = np.where(overflow, 0, man) + exp = np.where(overflow, exp + 1, exp) + normal_result = np.where(exp >= 15, np.uint8(0x7E), ((exp << 3) | man).astype(np.uint8)) + + return np.where(x <= 0.0, np.uint8(0), + np.where(ue4m3_exp <= 0, sub_result, + np.where(ue4m3_exp >= 15, np.uint8(0x7E), normal_result))) + + @classmethod + def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: + n_super = blocks.shape[0] + + d_bytes, qs = np.hsplit(blocks, [4]) + d = cls.ue4m3_to_fp32(d_bytes).reshape(n_super, 4, 1) # (n_super, 4, 1) + + qs = qs.reshape(n_super, 4, 8) + lo = (qs & np.uint8(0x0F)).view(np.int8) + hi = (qs >> np.uint8(4)).view(np.int8) + vals = np.concatenate([lo, hi], axis=-1) # (n_super, 4, 16) + + kvalues = np.array(cls.kvalues, dtype=np.int8).reshape(1, 1, 16) + vals = np.take_along_axis(kvalues, vals, axis=-1) + + return (d * vals.astype(np.float32)).reshape(n_super, 64) + + class IQ2_XXS(__Quant, qtype=GGMLQuantizationType.IQ2_XXS): ksigns: bytes = ( b"\x00\x81\x82\x03\x84\x05\x06\x87\x88\x09\x0a\x8b\x0c\x8d\x8e\x0f" diff --git a/gguf-py/gguf/scripts/gguf_convert_endian.py b/gguf-py/gguf/scripts/gguf_convert_endian.py index 86bf87846..164c9171e 100755 --- a/gguf-py/gguf/scripts/gguf_convert_endian.py +++ b/gguf-py/gguf/scripts/gguf_convert_endian.py @@ -65,6 +65,7 @@ byteswap_tensors = { gguf.GGMLQuantizationType.Q4_K: byteswap_q4_k, gguf.GGMLQuantizationType.Q6_K: byteswap_q6_k, gguf.GGMLQuantizationType.MXFP4: byteswap_noop, + gguf.GGMLQuantizationType.NVFP4: byteswap_noop, } diff --git a/gguf-py/tests/test_quants.py b/gguf-py/tests/test_quants.py index 172fa0018..9aa7c4ae2 100755 --- a/gguf-py/tests/test_quants.py +++ b/gguf-py/tests/test_quants.py @@ -68,6 +68,7 @@ class GGMLQuants: "q2_K", "q3_K", "q4_K", "q5_K", "q6_K", "tq1_0", "tq2_0", "mxfp4", + "nvfp4", "iq2_xxs", "iq2_xs", "iq2_s", "iq3_xxs", "iq3_s", "iq1_s", "iq1_m", "iq4_nl", "iq4_xs", ): diff --git a/include/llama.h b/include/llama.h index e4ded0d92..2c8835d0b 100644 --- a/include/llama.h +++ b/include/llama.h @@ -156,6 +156,7 @@ extern "C" { LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors LLAMA_FTYPE_MOSTLY_MXFP4_MOE = 38, // except 1d tensors + LLAMA_FTYPE_MOSTLY_NVFP4 = 39, // except 1d tensors LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file }; diff --git a/otherarch/sdcpp/stable-diffusion.h b/otherarch/sdcpp/stable-diffusion.h index 8b3545295..062afd3c4 100644 --- a/otherarch/sdcpp/stable-diffusion.h +++ b/otherarch/sdcpp/stable-diffusion.h @@ -120,7 +120,8 @@ enum sd_type_t { // SD_TYPE_IQ4_NL_4_8 = 37, // SD_TYPE_IQ4_NL_8_8 = 38, SD_TYPE_MXFP4 = 39, // MXFP4 (1 block) - SD_TYPE_COUNT = 40, + SD_TYPE_NVFP4 = 40, // NVFP4 (4 blocks, E4M3 scale) + SD_TYPE_COUNT = 41, }; enum sd_log_level_t { diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 0e7c11647..2a54ad7a2 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -7,6 +7,7 @@ #include "llama-memory.h" #include "llama-mmap.h" #include "llama-model.h" +#include "llama-ext.h" #include #include @@ -154,7 +155,8 @@ llama_context::llama_context( cparams.auto_fa = params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO; cparams.fused_gdn_ar = true; - cparams.fused_gdn_ch = false; // TODO: implement + cparams.fused_gdn_ch = true; + cparams.auto_fgdn = true; // with causal attention, the batch size is limited by the context size cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; @@ -348,6 +350,14 @@ llama_context::llama_context( if (cparams.pipeline_parallel) { LLAMA_LOG_INFO("%s: pipeline parallelism enabled\n", __func__); + + if (!graph_reuse_disable) { + // TODO: figure out a way to make graph reuse work with pipeline parallelism + // ref: https://github.com/ggml-org/llama.cpp/pull/20463 + LLAMA_LOG_WARN("%s: graph reuse is currently not compatible with pipeline parallelism - disabling\n", __func__); + + graph_reuse_disable = true; + } } sched_reserve(); @@ -471,37 +481,81 @@ void llama_context::sched_reserve() { cparams.auto_fa = false; } - if (cparams.fused_gdn_ar) { - auto * gf = graph_reserve(1, n_seqs, n_outputs, mctx.get(), true); - if (!gf) { - throw std::runtime_error("failed to reserve graph for fused Gated Delta Net check"); - } + if (cparams.auto_fgdn) { + LLAMA_LOG_INFO("%s: resolving fused Gated Delta Net support:\n", __func__); - const size_t prefix_len = strlen(LLAMA_TENSOR_NAME_FGDNAR) + 1; - bool gdn_device_mismatch = false; - for (int i = 0; i < ggml_graph_n_nodes(gf); i++) { - ggml_tensor * n = ggml_graph_node(gf, i); - if (n->op != GGML_OP_GATED_DELTA_NET) { - continue; + if (cparams.fused_gdn_ar) { + auto * gf = graph_reserve(1, n_seqs, n_outputs, mctx.get(), true); + if (!gf) { + throw std::runtime_error("failed to reserve graph for fused Gated Delta Net check (autoregressive)"); } - ggml_backend_dev_t device_gdn = ggml_backend_get_device(ggml_backend_sched_get_tensor_backend(sched.get(), n)); - GGML_ASSERT(strncmp(n->name, LLAMA_TENSOR_NAME_FGDNAR "-", prefix_len) == 0); - const int il = std::stoi(n->name + prefix_len); - ggml_backend_dev_t device_kv = model.dev_layer(il); - if (device_gdn != device_kv) { - LLAMA_LOG_WARN("%s: layer %d is assigned to device %s but the fused Gated Delta Net tensor " - "is assigned to device %s (usually due to missing support)\n", - __func__, il, ggml_backend_dev_name(device_kv), ggml_backend_dev_name(device_gdn)); - gdn_device_mismatch = true; - break; + const size_t prefix_len = strlen(LLAMA_TENSOR_NAME_FGDN_AR) + 1; + bool gdn_device_mismatch = false; + for (int i = 0; i < ggml_graph_n_nodes(gf); i++) { + ggml_tensor * n = ggml_graph_node(gf, i); + if (n->op != GGML_OP_GATED_DELTA_NET) { + continue; + } + ggml_backend_dev_t device_gdn = ggml_backend_get_device(ggml_backend_sched_get_tensor_backend(sched.get(), n)); + + GGML_ASSERT(strncmp(n->name, LLAMA_TENSOR_NAME_FGDN_AR "-", prefix_len) == 0); + const int il = std::stoi(n->name + prefix_len); + ggml_backend_dev_t device_kv = model.dev_layer(il); + if (device_gdn != device_kv) { + LLAMA_LOG_WARN("%s: layer %d is assigned to device %s but the fused Gated Delta Net tensor " + "is assigned to device %s (usually due to missing support)\n", + __func__, il, ggml_backend_dev_name(device_kv), ggml_backend_dev_name(device_gdn)); + gdn_device_mismatch = true; + break; + } + } + + if (gdn_device_mismatch) { + cparams.fused_gdn_ar = false; + LLAMA_LOG_WARN("%s: fused Gated Delta Net (autoregressive) not supported, set to disabled\n", __func__); + } else { + LLAMA_LOG_INFO("%s: fused Gated Delta Net (autoregressive) enabled\n", __func__); } } - if (gdn_device_mismatch) { - cparams.fused_gdn_ar = false; - LLAMA_LOG_WARN("%s: fused Gated Delta Net not supported, set to disabled\n", __func__); + if (cparams.fused_gdn_ch) { + // more than one token in the batch per sequence in order to take the chunked path + auto * gf = graph_reserve(16*n_seqs, n_seqs, n_outputs, mctx.get(), true); + if (!gf) { + throw std::runtime_error("failed to reserve graph for fused Gated Delta Net check (chunked)"); + } + + const size_t prefix_len = strlen(LLAMA_TENSOR_NAME_FGDN_CH) + 1; + bool gdn_device_mismatch = false; + for (int i = 0; i < ggml_graph_n_nodes(gf); i++) { + ggml_tensor * n = ggml_graph_node(gf, i); + if (n->op != GGML_OP_GATED_DELTA_NET) { + continue; + } + ggml_backend_dev_t device_gdn = ggml_backend_get_device(ggml_backend_sched_get_tensor_backend(sched.get(), n)); + + GGML_ASSERT(strncmp(n->name, LLAMA_TENSOR_NAME_FGDN_CH "-", prefix_len) == 0); + const int il = std::stoi(n->name + prefix_len); + ggml_backend_dev_t device_kv = model.dev_layer(il); + if (device_gdn != device_kv) { + LLAMA_LOG_WARN("%s: layer %d is assigned to device %s but the fused Gated Delta Net tensor " + "is assigned to device %s (usually due to missing support)\n", + __func__, il, ggml_backend_dev_name(device_kv), ggml_backend_dev_name(device_gdn)); + gdn_device_mismatch = true; + break; + } + } + + if (gdn_device_mismatch) { + cparams.fused_gdn_ch = false; + LLAMA_LOG_WARN("%s: fused Gated Delta Net (chunked) not supported, set to disabled\n", __func__); + } else { + LLAMA_LOG_INFO("%s: fused Gated Delta Net (chunked) enabled\n", __func__); + } } + + cparams.auto_fgdn = false; } // reserve worst-case graph @@ -3094,6 +3148,19 @@ uint32_t llama_get_sampled_probs_count_ith(llama_context * ctx, int32_t i) { return static_cast(ctx->get_sampled_probs_count(i)); } +struct ggml_cgraph * llama_graph_reserve( + struct llama_context * ctx, + uint32_t n_tokens, + uint32_t n_seqs, + uint32_t n_outputs) { + auto * memory = ctx->get_memory(); + llama_memory_context_ptr mctx; + if (memory) { + mctx = memory->init_full(); + } + return ctx->graph_reserve(n_tokens, n_seqs, n_outputs, mctx.get()); +} + // llama adapter API int32_t llama_set_adapters_lora( diff --git a/src/llama-cparams.h b/src/llama-cparams.h index 333922468..9d3594741 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -33,6 +33,7 @@ struct llama_cparams { bool auto_fa; bool fused_gdn_ar; // use fused gated delta net (autoregressive) bool fused_gdn_ch; // use fused gated delta net (chunked) + bool auto_fgdn; bool no_perf; bool warmup; bool op_offload; diff --git a/src/llama-ext.h b/src/llama-ext.h new file mode 100644 index 000000000..13ced783b --- /dev/null +++ b/src/llama-ext.h @@ -0,0 +1,12 @@ +#pragma once + +#include "llama-context.h" +#include "ggml.h" +#include "stdint.h" + +// Reserve a new compute graph. It is valid until the next call to llama_graph_reserve. +LLAMA_API struct ggml_cgraph * llama_graph_reserve( + struct llama_context * ctx, + uint32_t n_tokens, + uint32_t n_seqs, + uint32_t n_outputs); diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index 25fbf6c76..79f26a372 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -1220,13 +1220,13 @@ struct llama_grammar * llama_grammar_init_impl( // if there is a grammar, parse it // rules will be empty (default) if there are parse errors if (!parser.parse(grammar_str) || parser.rules.empty()) { - fprintf(stderr, "%s: failed to parse grammar\n", __func__); + LLAMA_LOG_ERROR("failed to parse grammar\n"); return nullptr; } - // Ensure that there is a "root" node. - if (parser.symbol_ids.find("root") == parser.symbol_ids.end()) { - fprintf(stderr, "%s: grammar does not contain a 'root' symbol\n", __func__); + // Ensure that the grammar contains the start symbol + if (parser.symbol_ids.find(grammar_root) == parser.symbol_ids.end()) { + LLAMA_LOG_ERROR("grammar does not contain a '%s' symbol\n", grammar_root); return nullptr; } @@ -1255,7 +1255,7 @@ struct llama_grammar * llama_grammar_init_impl( continue; } if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) { - LLAMA_LOG_ERROR("unsupported grammar, left recursion detected for nonterminal at index %zu", i); + LLAMA_LOG_ERROR("unsupported grammar, left recursion detected for nonterminal at index %zu\n", i); return nullptr; } } diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 528f8e545..9a215bb77 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -900,7 +900,8 @@ ggml_tensor * llm_graph_context::build_cvec( ggml_tensor * llm_graph_context::build_lora_mm( ggml_tensor * w, - ggml_tensor * cur) const { + ggml_tensor * cur, + ggml_tensor * w_s) const { ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); for (const auto & lora : *loras) { @@ -921,6 +922,10 @@ ggml_tensor * llm_graph_context::build_lora_mm( res = ggml_add(ctx0, res, ab_cur); } + if (w_s) { + res = ggml_mul(ctx0, res, w_s); + } + return res; } @@ -1166,7 +1171,10 @@ ggml_tensor * llm_graph_context::build_moe_ffn( llama_expert_gating_func_type gating_op, int il, ggml_tensor * probs_in, - ggml_tensor * gate_up_exps) const { + ggml_tensor * gate_up_exps, + ggml_tensor * up_exps_s, + ggml_tensor * gate_exps_s, + ggml_tensor * down_exps_s) const { return build_moe_ffn( cur, gate_inp, /* gate_inp_b */ nullptr, @@ -1182,7 +1190,11 @@ ggml_tensor * llm_graph_context::build_moe_ffn( gating_op, il, probs_in, - gate_up_exps + gate_up_exps, + /* gate_up_exps_b */ nullptr, + up_exps_s, + gate_exps_s, + down_exps_s ); } @@ -1206,7 +1218,10 @@ ggml_tensor * llm_graph_context::build_moe_ffn( int il, ggml_tensor * probs_in, ggml_tensor * gate_up_exps, - ggml_tensor * gate_up_exps_b) const { + ggml_tensor * gate_up_exps_b, + ggml_tensor * up_exps_s, + ggml_tensor * gate_exps_s, + ggml_tensor * down_exps_s) const { const int64_t n_embd = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN @@ -1358,6 +1373,15 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cb(gate_up, "ffn_moe_gate_up_biased", il); } + // apply per-expert scale2 to merged gate_up (use up_exps_s since gate and up are fused) + if (up_exps_s) { + ggml_tensor * s = ggml_reshape_3d(ctx0, up_exps_s, 1, n_expert, 1); + s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); + s = ggml_get_rows(ctx0, s, selected_experts); // [1, n_expert_used, n_tokens] + gate_up = ggml_mul(ctx0, gate_up, s); + cb(gate_up, "ffn_moe_gate_up_scaled", il); + } + const int64_t n_ff = gate_up->ne[0] / 2; cur = ggml_view_3d(ctx0, gate_up, n_ff, gate_up->ne[1], gate_up->ne[2], gate_up->nb[1], gate_up->nb[2], 0); cb(cur, "ffn_moe_gate", il); @@ -1373,6 +1397,15 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cb(up, "ffn_moe_up_biased", il); } + // apply per-expert scale2 to up + if (up_exps_s) { + ggml_tensor * s = ggml_reshape_3d(ctx0, up_exps_s, 1, n_expert, 1); + s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); + s = ggml_get_rows(ctx0, s, selected_experts); // [1, n_expert_used, n_tokens] + up = ggml_mul(ctx0, up, s); + cb(up, "ffn_moe_up_scaled", il); + } + if (gate_exps) { cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] cb(cur, "ffn_moe_gate", il); @@ -1384,6 +1417,15 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts); cb(cur, "ffn_moe_gate_biased", il); } + + // apply per-expert scale2 to gate + if (gate_exps_s) { + ggml_tensor * s = ggml_reshape_3d(ctx0, gate_exps_s, 1, n_expert, 1); + s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); + s = ggml_get_rows(ctx0, s, selected_experts); // [1, n_expert_used, n_tokens] + cur = ggml_mul(ctx0, cur, s); + cb(cur, "ffn_moe_gate_scaled", il); + } } const bool has_gate = gate_exps || gate_up_exps; @@ -1463,6 +1505,15 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cb(experts, "ffn_moe_down_biased", il); } + // apply per-expert scale2 to down + if (down_exps_s) { + ggml_tensor * s = ggml_reshape_3d(ctx0, down_exps_s, 1, n_expert, 1); + s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); + s = ggml_get_rows(ctx0, s, selected_experts); // [1, n_expert_used, n_tokens] + experts = ggml_mul(ctx0, experts, s); + cb(experts, "ffn_moe_down_scaled", il); + } + if (!weight_before_ffn) { experts = ggml_mul(ctx0, experts, weights); cb(cur, "ffn_moe_weighted", il); diff --git a/src/llama-graph.h b/src/llama-graph.h index 7f6c9e963..4855685ef 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -764,10 +764,11 @@ struct llm_graph_context { ggml_tensor * cur, int il) const; - // do mat_mul, while optionally apply lora + // do mat_mul, while optionally apply lora and per-tensor scale ggml_tensor * build_lora_mm( ggml_tensor * w, - ggml_tensor * cur) const; + ggml_tensor * cur, + ggml_tensor * w_s = nullptr) const; // do mat_mul_id, while optionally apply lora ggml_tensor * build_lora_mm_id( @@ -814,7 +815,10 @@ struct llm_graph_context { llama_expert_gating_func_type gating_op, int il, ggml_tensor * probs_in = nullptr, - ggml_tensor * gate_up_exps = nullptr) const; + ggml_tensor * gate_up_exps = nullptr, + ggml_tensor * up_exps_s = nullptr, + ggml_tensor * gate_exps_s = nullptr, + ggml_tensor * down_exps_s = nullptr) const; ggml_tensor * build_moe_ffn( ggml_tensor * cur, @@ -836,7 +840,10 @@ struct llm_graph_context { int il, ggml_tensor * probs_in = nullptr, ggml_tensor * gate_up_exps = nullptr, - ggml_tensor * gate_up_exps_b = nullptr) const; + ggml_tensor * gate_up_exps_b = nullptr, + ggml_tensor * up_exps_s = nullptr, + ggml_tensor * gate_exps_s = nullptr, + ggml_tensor * down_exps_s = nullptr) const; // // inputs diff --git a/src/llama-impl.h b/src/llama-impl.h index ee27ac1be..e4f35c8e5 100644 --- a/src/llama-impl.h +++ b/src/llama-impl.h @@ -70,6 +70,6 @@ std::string llama_format_tensor_shape(const struct ggml_tensor * t); std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i); -#define LLAMA_TENSOR_NAME_FATTN "__fattn__" -#define LLAMA_TENSOR_NAME_FGDNAR "__fgdnar__" -#define LLAMA_TENSOR_NAME_FGDNCH "__fgdnch__" +#define LLAMA_TENSOR_NAME_FATTN "__fattn__" +#define LLAMA_TENSOR_NAME_FGDN_AR "__fgdn_ar__" +#define LLAMA_TENSOR_NAME_FGDN_CH "__fgdn_ch__" diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 8261df7d3..5a6348893 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -42,6 +42,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; case LLAMA_FTYPE_MOSTLY_MXFP4_MOE: return "MXFP4 MoE"; + case LLAMA_FTYPE_MOSTLY_NVFP4: return "NVFP4"; case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; @@ -725,6 +726,7 @@ llama_model_loader::llama_model_loader( case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break; case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break; case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break; + case GGML_TYPE_NVFP4: ftype = LLAMA_FTYPE_MOSTLY_NVFP4; break; default: { LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max)); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index acfbfe944..70dcc356e 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -5168,23 +5168,23 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0); layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); - layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.wq_s = create_tensor(tn(LLM_TENSOR_ATTN_Q, "scale", i), {1}, TENSOR_NOT_REQUIRED); layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); - layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.wk_s = create_tensor(tn(LLM_TENSOR_ATTN_K, "scale", i), {1}, TENSOR_NOT_REQUIRED); layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); - layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.wv_s = create_tensor(tn(LLM_TENSOR_ATTN_V, "scale", i), {1}, TENSOR_NOT_REQUIRED); layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); - layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.wo_s = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale", i), {1}, TENSOR_NOT_REQUIRED); layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0); layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); - layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.ffn_gate_s = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale", i), {1}, TENSOR_NOT_REQUIRED); layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); - layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.ffn_down_s = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale", i), {1}, TENSOR_NOT_REQUIRED); layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); - layer.ffn_up_scale = create_tensor(tn(LLM_TENSOR_FFN_UP, "scale", i), {1}, TENSOR_NOT_REQUIRED); + layer.ffn_up_s = create_tensor(tn(LLM_TENSOR_FFN_UP, "scale", i), {1}, TENSOR_NOT_REQUIRED); } } break; case LLM_ARCH_T5: @@ -7601,6 +7601,48 @@ bool llama_model::load_tensors(llama_model_loader & ml) { default: throw std::runtime_error("unknown architecture"); } + + // generic pass: load optional per-tensor/per-expert ".scale" tensors (e.g. NVFP4 scale2) + // this avoids having to add scale loading to every architecture + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + // attention weight scales (per-tensor, shape {1}) + if (!layer.wq_s && layer.wq) { + layer.wq_s = create_tensor(tn(LLM_TENSOR_ATTN_Q, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + if (!layer.wk_s && layer.wk) { + layer.wk_s = create_tensor(tn(LLM_TENSOR_ATTN_K, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + if (!layer.wv_s && layer.wv) { + layer.wv_s = create_tensor(tn(LLM_TENSOR_ATTN_V, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + if (!layer.wo_s && layer.wo) { + layer.wo_s = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + + // dense FFN weight scales (per-tensor, shape {1}) + if (!layer.ffn_gate_s && layer.ffn_gate) { + layer.ffn_gate_s = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + if (!layer.ffn_down_s && layer.ffn_down) { + layer.ffn_down_s = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + if (!layer.ffn_up_s && layer.ffn_up) { + layer.ffn_up_s = create_tensor(tn(LLM_TENSOR_FFN_UP, "scale", i), {1}, TENSOR_NOT_REQUIRED); + } + + // MoE expert weight scales (per-expert, shape {n_expert}) + if (!layer.ffn_gate_exps_s && layer.ffn_gate_exps) { + layer.ffn_gate_exps_s = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "scale", i), {n_expert}, TENSOR_NOT_REQUIRED); + } + if (!layer.ffn_down_exps_s && layer.ffn_down_exps) { + layer.ffn_down_exps_s = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "scale", i), {n_expert}, TENSOR_NOT_REQUIRED); + } + if (!layer.ffn_up_exps_s && layer.ffn_up_exps) { + layer.ffn_up_exps_s = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "scale", i), {n_expert}, TENSOR_NOT_REQUIRED); + } + } } ml.done_getting_tensors(); diff --git a/src/llama-model.h b/src/llama-model.h index 74c79a774..9a2dacecc 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -295,6 +295,11 @@ struct llama_layer { struct ggml_tensor * ffn_up_exps_b = nullptr; struct ggml_tensor * ffn_gate_up_exps_b = nullptr; + // ff MoE per-expert scales (NVFP4 per-tensor scale2) + struct ggml_tensor * ffn_gate_exps_s = nullptr; + struct ggml_tensor * ffn_down_exps_s = nullptr; + struct ggml_tensor * ffn_up_exps_s = nullptr; + // ff MoE latent proj struct ggml_tensor * ffn_latent_down = nullptr; struct ggml_tensor * ffn_latent_up = nullptr; @@ -392,13 +397,13 @@ struct llama_layer { struct ggml_tensor * rope_freqs = nullptr; // bitnet scale - struct ggml_tensor * wq_scale = nullptr; - struct ggml_tensor * wk_scale = nullptr; - struct ggml_tensor * wv_scale = nullptr; - struct ggml_tensor * wo_scale = nullptr; - struct ggml_tensor * ffn_gate_scale = nullptr; - struct ggml_tensor * ffn_up_scale = nullptr; - struct ggml_tensor * ffn_down_scale = nullptr; + struct ggml_tensor * wq_s = nullptr; + struct ggml_tensor * wk_s = nullptr; + struct ggml_tensor * wv_s = nullptr; + struct ggml_tensor * wo_s = nullptr; + struct ggml_tensor * ffn_gate_s = nullptr; + struct ggml_tensor * ffn_up_s = nullptr; + struct ggml_tensor * ffn_down_s = nullptr; // altup & laurel struct ggml_tensor * per_layer_inp_gate = nullptr; diff --git a/src/models/bitnet.cpp b/src/models/bitnet.cpp index d47638498..ccf5bc8e8 100644 --- a/src/models/bitnet.cpp +++ b/src/models/bitnet.cpp @@ -29,10 +29,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa // self-attention { // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - if (model.layers[il].wq_scale) { - Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_scale); - } + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s); cb(Qcur, "Qcur", il); if (model.layers[il].bq) { Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); @@ -40,10 +37,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa } // B1.K - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - if (model.layers[il].wk_scale) { - Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_scale); - } + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s); cb(Kcur, "Kcur", il); if (model.layers[il].bk) { Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); @@ -51,10 +45,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa } // B1.V - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - if (model.layers[il].wv_scale) { - Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_scale); - } + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s); cb(Vcur, "Vcur", il); if (model.layers[il].bv) { Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); @@ -90,10 +81,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa LLM_NORM_RMS, il); cb(cur, "attn_sub_norm", il); - cur = build_lora_mm(model.layers[il].wo, cur); - if (model.layers[il].wo_scale) { - cur = ggml_mul(ctx0, cur, model.layers[il].wo_scale); - } + cur = build_lora_mm(model.layers[il].wo, cur, model.layers[il].wo_s); if (model.layers[il].bo) { cur = ggml_add(ctx0, cur, model.layers[il].bo); } @@ -115,8 +103,8 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa cb(cur, "ffn_norm", il); cur = build_ffn(cur, - model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_scale, - model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_scale, + model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_s, + model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_s, NULL, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); @@ -127,10 +115,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa LLM_NORM_RMS, il); cb(cur, "ffn_sub_norm", il); - cur = build_lora_mm(model.layers[il].ffn_down, cur); - if (model.layers[il].ffn_down_scale) { - cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_scale); - } + cur = build_lora_mm(model.layers[il].ffn_down, cur, model.layers[il].ffn_down_s); cb(cur, "ffn_down", il); cur = ggml_add(ctx0, cur, ffn_inp); diff --git a/src/models/delta-net-base.cpp b/src/models/delta-net-base.cpp index b0be62fc6..a62dbc15d 100644 --- a/src/models/delta-net-base.cpp +++ b/src/models/delta-net-base.cpp @@ -41,13 +41,6 @@ std::pair llm_build_delta_net_base::build_delta_ne GGML_ASSERT(b->ne[0] == 1 && b->ne[1] == H_v && b->ne[2] == n_tokens && b->ne[3] == n_seqs); GGML_ASSERT(s->ne[0] == S_v && s->ne[1] == S_v && s->ne[2] == H_v && s->ne[3] == n_seqs); - if (cparams.fused_gdn_ch) { - //ggml_tensor * result = ggml_gated_delta_net(ctx0, q, k, v, g, b, s); - //cb(result, LLAMA_TENSOR_NAME_FGDNCH, il); - - GGML_ABORT("not implemented yet"); - } - const float scale = 1.0f / sqrtf(S_k); q = ggml_scale(ctx0, q, scale); @@ -325,26 +318,6 @@ std::pair llm_build_delta_net_base::build_delta_ne GGML_ASSERT(b->ne[0] == 1 && b->ne[1] == H_v && b->ne[2] == n_tokens && b->ne[3] == n_seqs); GGML_ASSERT(s->ne[0] == S_v && s->ne[1] == S_v && s->ne[2] == H_v && s->ne[3] == n_seqs); - if (cparams.fused_gdn_ar) { - ggml_tensor * result = ggml_gated_delta_net(ctx0, q, k, v, g, b, s); - cb(result, LLAMA_TENSOR_NAME_FGDNAR, il); - - ggml_tensor * output = ggml_view_4d(ctx0, result, - S_v, H_v, n_tokens, n_seqs, - ggml_row_size(result->type, S_v), - ggml_row_size(result->type, S_v * H_v), - ggml_row_size(result->type, S_v * H_v * n_tokens), 0); - - ggml_tensor * new_state = ggml_view_4d(ctx0, result, - S_v, S_v, H_v, n_seqs, - ggml_row_size(result->type, S_v), - ggml_row_size(result->type, S_v * S_v), - ggml_row_size(result->type, S_v * S_v * H_v), - ggml_row_size(result->type, S_v * H_v * n_tokens * n_seqs)); - - return {output, new_state}; - } - const float scale = 1.0f / sqrtf(S_k); q = ggml_scale(ctx0, q, scale); @@ -401,3 +374,78 @@ std::pair llm_build_delta_net_base::build_delta_ne return {o, s}; } + +std::pair llm_build_delta_net_base::build_delta_net_fused( + ggml_tensor * q, + ggml_tensor * k, + ggml_tensor * v, + ggml_tensor * g, + ggml_tensor * b, + ggml_tensor * s, + int il) { + const int64_t S_k = q->ne[0]; + const int64_t H_k = q->ne[1]; + const int64_t n_tokens = q->ne[2]; + const int64_t n_seqs = q->ne[3]; + + const int64_t S_v = v->ne[0]; + const int64_t H_v = v->ne[1]; + + GGML_ASSERT(S_k == S_v); + GGML_ASSERT(H_v % H_k == 0); + + GGML_ASSERT(q->ne[0] == S_k && q->ne[1] == H_k && q->ne[2] == n_tokens && q->ne[3] == n_seqs); + GGML_ASSERT(k->ne[0] == S_k && k->ne[1] == H_k && k->ne[2] == n_tokens && k->ne[3] == n_seqs); + GGML_ASSERT(v->ne[0] == S_v && v->ne[1] == H_v && v->ne[2] == n_tokens && v->ne[3] == n_seqs); + + GGML_ASSERT(g->ne[0] == 1 || g->ne[0] == S_v); + GGML_ASSERT( g->ne[1] == H_v && g->ne[2] == n_tokens && g->ne[3] == n_seqs); + GGML_ASSERT(b->ne[0] == 1 && b->ne[1] == H_v && b->ne[2] == n_tokens && b->ne[3] == n_seqs); + GGML_ASSERT(s->ne[0] == S_v && s->ne[1] == S_v && s->ne[2] == H_v && s->ne[3] == n_seqs); + + ggml_tensor * result = ggml_gated_delta_net(ctx0, q, k, v, g, b, s); + if (n_tokens == 1) { + cb(result, LLAMA_TENSOR_NAME_FGDN_AR, il); + } else { + cb(result, LLAMA_TENSOR_NAME_FGDN_CH, il); + } + + ggml_tensor * output = ggml_view_4d(ctx0, result, + S_v, H_v, n_tokens, n_seqs, + ggml_row_size(result->type, S_v), + ggml_row_size(result->type, S_v * H_v), + ggml_row_size(result->type, S_v * H_v * n_tokens), 0); + + ggml_tensor * new_state = ggml_view_4d(ctx0, result, + S_v, S_v, H_v, n_seqs, + ggml_row_size(result->type, S_v), + ggml_row_size(result->type, S_v * S_v), + ggml_row_size(result->type, S_v * S_v * H_v), + ggml_row_size(result->type, S_v * H_v * n_tokens * n_seqs)); + + return {output, new_state}; +} + +std::pair llm_build_delta_net_base::build_delta_net( + ggml_tensor * q, + ggml_tensor * k, + ggml_tensor * v, + ggml_tensor * g, + ggml_tensor * b, + ggml_tensor * s, + int il) { + const int64_t n_seq_tokens = q->ne[2]; + + if (n_seq_tokens == 1) { + if (cparams.fused_gdn_ar) { + return build_delta_net_fused(q, k, v, g, b, s, il); + } + return build_delta_net_autoregressive(q, k, v, g, b, s, il); + } + + if (cparams.fused_gdn_ch) { + return build_delta_net_fused(q, k, v, g, b, s, il); + } + + return build_delta_net_chunking(q, k, v, g, b, s, il); +} diff --git a/src/models/kimi-linear.cpp b/src/models/kimi-linear.cpp index 063b17a2f..4d62f4e71 100644 --- a/src/models/kimi-linear.cpp +++ b/src/models/kimi-linear.cpp @@ -169,9 +169,7 @@ llm_build_kimi_linear::llm_build_kimi_linear(const llama_model & model, const ll Kcur = ggml_l2_norm(ctx0, Kcur, eps_norm); // Choose between build_delta_net_chunking and build_delta_net_recurrent based on n_tokens - std::pair attn_out = n_seq_tokens == 1 ? - build_delta_net_autoregressive(Qcur, Kcur, Vcur, g1, beta, state, il) : - build_delta_net_chunking(Qcur, Kcur, Vcur, g1, beta, state, il); + auto attn_out = build_delta_net(Qcur, Kcur, Vcur, g1, beta, state, il); ggml_tensor * output = ggml_cont(ctx0, attn_out.first); ggml_tensor * new_state = attn_out.second; diff --git a/src/models/llama.cpp b/src/models/llama.cpp index ca4beac51..e08ae0c0b 100644 --- a/src/models/llama.cpp +++ b/src/models/llama.cpp @@ -43,19 +43,19 @@ llm_build_llama::llm_build_llama(const llama_model & model, const llm_gra ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s); cb(Qcur, "Qcur", il); if (model.layers[il].bq) { Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); cb(Qcur, "Qcur", il); } - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s); cb(Kcur, "Kcur", il); if (model.layers[il].bk) { Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); cb(Kcur, "Kcur", il); } - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s); cb(Vcur, "Vcur", il); if (model.layers[il].bv) { Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); @@ -91,6 +91,9 @@ llm_build_llama::llm_build_llama(const llama_model & model, const llm_gra cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il); + if (model.layers[il].wo_s) { + cur = ggml_mul(ctx0, cur, model.layers[il].wo_s); + } cb(cur, "attn_out", il); } if (il == n_layer - 1 && inp_out_ids) { @@ -109,9 +112,9 @@ llm_build_llama::llm_build_llama(const llama_model & model, const llm_gra cb(cur, "ffn_norm", il); cur = build_ffn(cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, model.layers[il].ffn_up_s, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, model.layers[il].ffn_gate_s, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, model.layers[il].ffn_down_s, NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); cb(cur, "ffn_out", il); @@ -132,7 +135,11 @@ llm_build_llama::llm_build_llama(const llama_model & model, const llm_gra LLM_FFN_SILU, true, hparams.expert_weights_scale, LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - il); + il, + nullptr, nullptr, + model.layers[il].ffn_up_exps_s, + model.layers[il].ffn_gate_exps_s, + model.layers[il].ffn_down_exps_s); cb(cur, "ffn_moe_out", il); } cur = ggml_add(ctx0, cur, ffn_inp); diff --git a/src/models/models.h b/src/models/models.h index cf9ba04e7..a86b2b1eb 100644 --- a/src/models/models.h +++ b/src/models/models.h @@ -44,6 +44,26 @@ struct llm_build_delta_net_base : public llm_graph_context { ggml_tensor * b, ggml_tensor * s, int il); + + // use the ggml_gated_delta_net fused operator + std::pair build_delta_net_fused( + ggml_tensor * q, + ggml_tensor * k, + ggml_tensor * v, + ggml_tensor * g, + ggml_tensor * b, + ggml_tensor * s, + int il); + + // choose one of two implementations above based on the number of tokens + std::pair build_delta_net( + ggml_tensor * q, + ggml_tensor * k, + ggml_tensor * v, + ggml_tensor * g, + ggml_tensor * b, + ggml_tensor * s, + int il); }; struct llm_build_rwkv6_base : public llm_graph_context { diff --git a/src/models/qwen3.cpp b/src/models/qwen3.cpp index be4811aba..520816684 100644 --- a/src/models/qwen3.cpp +++ b/src/models/qwen3.cpp @@ -30,13 +30,13 @@ llm_build_qwen3::llm_build_qwen3(const llama_model & model, const llm_graph_para // self-attention { // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s); cb(Qcur, "Qcur", il); - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s); cb(Kcur, "Kcur", il); - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s); cb(Vcur, "Vcur", il); Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); @@ -68,6 +68,9 @@ llm_build_qwen3::llm_build_qwen3(const llama_model & model, const llm_graph_para cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + if (model.layers[il].wo_s) { + cur = ggml_mul(ctx0, cur, model.layers[il].wo_s); + } } if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); @@ -83,9 +86,9 @@ llm_build_qwen3::llm_build_qwen3(const llama_model & model, const llm_graph_para cb(cur, "ffn_norm", il); cur = build_ffn(cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, + model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_s, + model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_s, + model.layers[il].ffn_down, NULL, model.layers[il].ffn_down_s, NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); cb(cur, "ffn_out", il); diff --git a/src/models/qwen35.cpp b/src/models/qwen35.cpp index ba096a5a7..e12dad700 100644 --- a/src/models/qwen35.cpp +++ b/src/models/qwen35.cpp @@ -321,9 +321,9 @@ ggml_tensor * llm_build_qwen35::build_layer_attn_linear( //v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs); // if head keys and value keys are different, repeat to force tensors into matching shapes - if (num_k_heads != num_v_heads) { + // note: need explicit repeat only if we are not using the fused GDN + if (num_k_heads != num_v_heads && (!cparams.fused_gdn_ar || !cparams.fused_gdn_ch)) { GGML_ASSERT(num_v_heads % num_k_heads == 0); - // TODO: try to avoid these explicit repeats by utilizing op broadcast q_conv = ggml_repeat_4d(ctx0, q_conv, head_k_dim, num_v_heads, n_seq_tokens, n_seqs); k_conv = ggml_repeat_4d(ctx0, k_conv, head_k_dim, num_v_heads, n_seq_tokens, n_seqs); } @@ -332,12 +332,8 @@ ggml_tensor * llm_build_qwen35::build_layer_attn_linear( cb(k_conv, "k_conv_predelta", il); cb(v_conv, "v_conv_predelta", il); - std::pair attn_out; - if (n_seq_tokens == 1) { - attn_out = build_delta_net_autoregressive(q_conv, k_conv, v_conv, gate, beta, state, il); - } else { - attn_out = build_delta_net_chunking(q_conv, k_conv, v_conv, gate, beta, state, il); - } + auto attn_out = build_delta_net(q_conv, k_conv, v_conv, gate, beta, state, il); + ggml_tensor * output = attn_out.first; ggml_tensor * new_state = attn_out.second; cb(output, "attn_output", il); diff --git a/src/models/qwen35moe.cpp b/src/models/qwen35moe.cpp index fe382286e..8d07c7ed2 100644 --- a/src/models/qwen35moe.cpp +++ b/src/models/qwen35moe.cpp @@ -321,9 +321,9 @@ ggml_tensor * llm_build_qwen35moe ::build_layer_attn_linear( //v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs); // if head keys and value keys are different, repeat to force tensors into matching shapes - if (num_k_heads != num_v_heads) { + // note: need explicit repeat only if we are not using the fused GDN + if (num_k_heads != num_v_heads && (!cparams.fused_gdn_ar || !cparams.fused_gdn_ch)) { GGML_ASSERT(num_v_heads % num_k_heads == 0); - // TODO: try to avoid these explicit repeats by utilizing op broadcast q_conv = ggml_repeat_4d(ctx0, q_conv, head_k_dim, num_v_heads, n_seq_tokens, n_seqs); k_conv = ggml_repeat_4d(ctx0, k_conv, head_k_dim, num_v_heads, n_seq_tokens, n_seqs); } @@ -332,12 +332,8 @@ ggml_tensor * llm_build_qwen35moe ::build_layer_attn_linear( cb(k_conv, "k_conv_predelta", il); cb(v_conv, "v_conv_predelta", il); - std::pair attn_out; - if (n_seq_tokens == 1) { - attn_out = build_delta_net_autoregressive(q_conv, k_conv, v_conv, gate, beta, state, il); - } else { - attn_out = build_delta_net_chunking(q_conv, k_conv, v_conv, gate, beta, state, il); - } + auto attn_out = build_delta_net(q_conv, k_conv, v_conv, gate, beta, state, il); + ggml_tensor * output = attn_out.first; ggml_tensor * new_state = attn_out.second; cb(output, "attn_output", il); diff --git a/src/models/qwen3moe.cpp b/src/models/qwen3moe.cpp index 5912a7158..dba46618f 100644 --- a/src/models/qwen3moe.cpp +++ b/src/models/qwen3moe.cpp @@ -30,13 +30,13 @@ llm_build_qwen3moe::llm_build_qwen3moe(const llama_model & model, const llm_grap // self_attention { // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s); cb(Qcur, "Qcur", il); - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s); cb(Kcur, "Kcur", il); - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s); cb(Vcur, "Vcur", il); Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); @@ -68,6 +68,9 @@ llm_build_qwen3moe::llm_build_qwen3moe(const llama_model & model, const llm_grap cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + if (model.layers[il].wo_s) { + cur = ggml_mul(ctx0, cur, model.layers[il].wo_s); + } } if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); @@ -93,7 +96,11 @@ llm_build_qwen3moe::llm_build_qwen3moe(const llama_model & model, const llm_grap LLM_FFN_SILU, true, hparams.expert_weights_scale, LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - il); + il, + nullptr, nullptr, + model.layers[il].ffn_up_exps_s, + model.layers[il].ffn_gate_exps_s, + model.layers[il].ffn_down_exps_s); cb(moe_out, "ffn_moe_out", il); cur = moe_out; diff --git a/src/models/qwen3next.cpp b/src/models/qwen3next.cpp index 364df27d8..bdc64f983 100644 --- a/src/models/qwen3next.cpp +++ b/src/models/qwen3next.cpp @@ -407,6 +407,7 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear( //v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs); // if head keys and value keys are different, repeat to force tensors into matching shapes + // TODO: avoid repeats for fused GDN, needs broadcast configuration for GDN op [TAG_GGML_GDN_BCAST] if (num_k_heads != num_v_heads) { GGML_ASSERT(num_v_heads % num_k_heads == 0); int64_t repeat_factor = num_v_heads / num_k_heads; @@ -432,13 +433,8 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear( cb(k_conv, "k_conv_predelta", il); cb(v_conv, "v_conv_predelta", il); - // Choose between build_delta_net_chunking, build_delta_net_recurrent, and build_delta_net_autoregressive based on n_tokens - std::pair attn_out; // pair of (output, new_state) - if (n_seq_tokens == 1) { - attn_out = build_delta_net_autoregressive(q_conv, k_conv, v_conv, gate, beta, state, il); - } else { - attn_out = build_delta_net_chunking(q_conv, k_conv, v_conv, gate, beta, state, il); - } + auto attn_out = build_delta_net(q_conv, k_conv, v_conv, gate, beta, state, il); + ggml_tensor * output = attn_out.first; ggml_tensor * new_state = attn_out.second; cb(output, "attn_output", il); diff --git a/tools/mtmd/clip-impl.h b/tools/mtmd/clip-impl.h index d9116e5da..a63f681d8 100644 --- a/tools/mtmd/clip-impl.h +++ b/tools/mtmd/clip-impl.h @@ -216,6 +216,7 @@ enum projector_type { PROJECTOR_TYPE_GEMMA3, PROJECTOR_TYPE_GEMMA3NV, PROJECTOR_TYPE_GEMMA3NA, + PROJECTOR_TYPE_PHI4, PROJECTOR_TYPE_IDEFICS3, PROJECTOR_TYPE_PIXTRAL, PROJECTOR_TYPE_QWEN25VL, @@ -253,6 +254,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_GEMMA3, "gemma3"}, { PROJECTOR_TYPE_GEMMA3NV, "gemma3nv"}, { PROJECTOR_TYPE_GEMMA3NA, "gemma3na"}, + { PROJECTOR_TYPE_PHI4, "phi4"}, { PROJECTOR_TYPE_IDEFICS3, "idefics3"}, { PROJECTOR_TYPE_PIXTRAL, "pixtral"}, { PROJECTOR_TYPE_ULTRAVOX, "ultravox"}, diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 2a765bf75..d88423fdc 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -842,6 +842,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 case PROJECTOR_TYPE_IDEFICS3: case PROJECTOR_TYPE_LFM2: case PROJECTOR_TYPE_JANUS_PRO: + case PROJECTOR_TYPE_PHI4: { builder = std::make_unique(ctx, img); } break; @@ -1218,6 +1219,13 @@ struct clip_model_loader { // ref: https://huggingface.co/LiquidAI/LFM2.5-VL-1.6B/blob/main/processor_config.json hparams.set_limit_image_tokens(64, 256); } break; + case PROJECTOR_TYPE_PHI4: + { + hparams.n_merge = 1; + get_u32(KEY_IMAGE_MIN_PIXELS, hparams.image_min_pixels); + get_u32(KEY_IMAGE_MAX_PIXELS, hparams.image_max_pixels); + hparams.set_warmup_n_tokens(16*16); + } break; case PROJECTOR_TYPE_PIXTRAL: case PROJECTOR_TYPE_LIGHTONOCR: { @@ -1920,6 +1928,13 @@ struct clip_model_loader { model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight")); model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias")); } break; + case PROJECTOR_TYPE_PHI4: + { + model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight")); + model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias")); + model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight")); + model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias")); + } break; case PROJECTOR_TYPE_LFM2A: { for (int i : {0, 2, 3, 5, 6}) { @@ -3361,6 +3376,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str res_imgs->entries.push_back(std::move(img_f32)); } break; + case PROJECTOR_TYPE_PHI4: case PROJECTOR_TYPE_PIXTRAL: case PROJECTOR_TYPE_LIGHTONOCR: { @@ -3587,6 +3603,7 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im case PROJECTOR_TYPE_MLP: case PROJECTOR_TYPE_MLP_NORM: case PROJECTOR_TYPE_JANUS_PRO: + case PROJECTOR_TYPE_PHI4: { // do nothing } break; @@ -4088,6 +4105,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima case PROJECTOR_TYPE_VOXTRAL: case PROJECTOR_TYPE_MUSIC_FLAMINGO: case PROJECTOR_TYPE_JANUS_PRO: + case PROJECTOR_TYPE_PHI4: case PROJECTOR_TYPE_COGVLM: { // do nothing @@ -4414,6 +4432,7 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) { case PROJECTOR_TYPE_LDPV2: return ctx->model.mm_model_peg_0_b->ne[0]; case PROJECTOR_TYPE_MLP: + case PROJECTOR_TYPE_PHI4: case PROJECTOR_TYPE_PIXTRAL: case PROJECTOR_TYPE_LIGHTONOCR: return ctx->model.mm_2_w->ne[1]; diff --git a/tools/mtmd/models/siglip.cpp b/tools/mtmd/models/siglip.cpp index b866a11c5..75f9b4db4 100644 --- a/tools/mtmd/models/siglip.cpp +++ b/tools/mtmd/models/siglip.cpp @@ -4,7 +4,7 @@ ggml_cgraph * clip_graph_siglip::build() { ggml_tensor * inp = build_inp(); ggml_tensor * learned_pos_embd = model.position_embeddings; - if (proj_type == PROJECTOR_TYPE_LFM2) { + if (proj_type == PROJECTOR_TYPE_LFM2 || proj_type == PROJECTOR_TYPE_PHI4) { learned_pos_embd = resize_position_embeddings(); } @@ -75,6 +75,14 @@ ggml_cgraph * clip_graph_siglip::build() { hparams.ffn_op, -1); + } else if (proj_type == PROJECTOR_TYPE_PHI4) { + cur = build_ffn(cur, + model.mm_0_w, model.mm_0_b, + nullptr, nullptr, + model.mm_2_w, model.mm_2_b, + FFN_GELU, + -1); + } else { GGML_ABORT("SigLIP: Unsupported projector type"); } diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index 8ca979c86..ccafb80b2 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -290,6 +290,9 @@ struct mtmd_context { img_beg = "<|vision_start|>"; img_end = "<|vision_end|>"; + } else if (proj == PROJECTOR_TYPE_PHI4) { + // Phi-4 uses media marker insertion only. Keep image boundary text empty. + } else if (proj == PROJECTOR_TYPE_LLAMA4) { // (more details in mtmd_context constructor) img_beg = "<|image_start|>"; diff --git a/tools/server/public/index.html.gz b/tools/server/public/index.html.gz index 3d0991dde..493058aa0 100644 Binary files a/tools/server/public/index.html.gz and b/tools/server/public/index.html.gz differ diff --git a/tools/server/tests/unit/test_template.py b/tools/server/tests/unit/test_template.py index e5185fcbf..43a356020 100644 --- a/tools/server/tests/unit/test_template.py +++ b/tools/server/tests/unit/test_template.py @@ -11,6 +11,7 @@ sys.path.insert(0, str(path)) import datetime from utils import * +from typing import Literal server: ServerProcess @@ -23,24 +24,24 @@ def create_server(): @pytest.mark.parametrize("tools", [None, [], [TEST_TOOL]]) -@pytest.mark.parametrize("template_name,reasoning_budget,expected_end", [ - ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", None, "\n"), - ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", -1, "\n"), - ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", 0, "\n"), +@pytest.mark.parametrize("template_name,reasoning,expected_end", [ + ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", "on", "\n"), + ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B","auto", "\n"), + ("deepseek-ai-DeepSeek-R1-Distill-Qwen-32B", "off", "\n"), - ("Qwen-Qwen3-0.6B", -1, "<|im_start|>assistant\n"), - ("Qwen-Qwen3-0.6B", 0, "<|im_start|>assistant\n\n\n\n\n"), + ("Qwen-Qwen3-0.6B","auto", "<|im_start|>assistant\n"), + ("Qwen-Qwen3-0.6B", "off", "<|im_start|>assistant\n\n\n\n\n"), - ("Qwen-QwQ-32B", -1, "<|im_start|>assistant\n\n"), - ("Qwen-QwQ-32B", 0, "<|im_start|>assistant\n\n"), + ("Qwen-QwQ-32B","auto", "<|im_start|>assistant\n\n"), + ("Qwen-QwQ-32B", "off", "<|im_start|>assistant\n\n"), - ("CohereForAI-c4ai-command-r7b-12-2024-tool_use", -1, "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"), - ("CohereForAI-c4ai-command-r7b-12-2024-tool_use", 0, "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|><|END_THINKING|>"), + ("CohereForAI-c4ai-command-r7b-12-2024-tool_use","auto", "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"), + ("CohereForAI-c4ai-command-r7b-12-2024-tool_use", "off", "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|><|END_THINKING|>"), ]) -def test_reasoning_budget(template_name: str, reasoning_budget: int | None, expected_end: str, tools: list[dict]): +def test_reasoning(template_name: str, reasoning: Literal['on', 'off', 'auto'] | None, expected_end: str, tools: list[dict]): global server server.jinja = True - server.reasoning_budget = reasoning_budget + server.reasoning = reasoning server.chat_template_file = f'../../../models/templates/{template_name}.jinja' server.start() diff --git a/tools/server/tests/utils.py b/tools/server/tests/utils.py index db357d876..c6fe11261 100644 --- a/tools/server/tests/utils.py +++ b/tools/server/tests/utils.py @@ -95,7 +95,7 @@ class ServerProcess: no_webui: bool | None = None jinja: bool | None = None reasoning_format: Literal['deepseek', 'none', 'nothink'] | None = None - reasoning_budget: int | None = None + reasoning: Literal['on', 'off', 'auto'] | None = None chat_template: str | None = None chat_template_file: str | None = None server_path: str | None = None @@ -225,8 +225,8 @@ class ServerProcess: server_args.append("--no-jinja") if self.reasoning_format is not None: server_args.extend(("--reasoning-format", self.reasoning_format)) - if self.reasoning_budget is not None: - server_args.extend(("--reasoning-budget", self.reasoning_budget)) + if self.reasoning is not None: + server_args.extend(("--reasoning", self.reasoning)) if self.chat_template: server_args.extend(["--chat-template", self.chat_template]) if self.chat_template_file: diff --git a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte index 850177693..2ad830e18 100644 --- a/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte +++ b/tools/server/webui/src/lib/components/app/chat/ChatForm/ChatFormActions/ChatFormActions.svelte @@ -62,15 +62,12 @@ chatStore.getConversationModel(activeMessages() as DatabaseMessage[]) ); - let previousConversationModel: string | null = null; - $effect(() => { - if (conversationModel && conversationModel !== previousConversationModel) { - previousConversationModel = conversationModel; - - if (!isRouter || modelsStore.isModelLoaded(conversationModel)) { - modelsStore.selectModelByName(conversationModel); - } + if (conversationModel) { + modelsStore.selectModelByName(conversationModel); + } else if (isRouter && modelsStore.loadedModelIds.length > 0) { + const first = modelOptions().find((m) => modelsStore.loadedModelIds.includes(m.model)); + if (first) modelsStore.selectModelById(first.id); } }); diff --git a/vendor/cpp-httplib/httplib.cpp b/vendor/cpp-httplib/httplib.cpp index c8f88d87d..71a5f0056 100644 --- a/vendor/cpp-httplib/httplib.cpp +++ b/vendor/cpp-httplib/httplib.cpp @@ -4424,7 +4424,8 @@ get_range_offset_and_length(Range r, size_t content_length) { assert(r.first <= r.second && r.second < static_cast(content_length)); (void)(content_length); - return std::make_pair(r.first, static_cast(r.second - r.first) + 1); + return std::make_pair(static_cast(r.first), + static_cast(r.second - r.first) + 1); } std::string make_content_range_header_field( @@ -8616,11 +8617,17 @@ ClientImpl::open_stream(const std::string &method, const std::string &path, handle.body_reader_.stream = handle.stream_; handle.body_reader_.payload_max_length = payload_max_length_; - auto content_length_str = handle.response->get_header_value("Content-Length"); - if (!content_length_str.empty()) { + if (handle.response->has_header("Content-Length")) { + bool is_invalid = false; + auto content_length = detail::get_header_value_u64( + handle.response->headers, "Content-Length", 0, 0, is_invalid); + if (is_invalid) { + handle.error = Error::Read; + handle.response.reset(); + return handle; + } handle.body_reader_.has_content_length = true; - handle.body_reader_.content_length = - static_cast(std::stoull(content_length_str)); + handle.body_reader_.content_length = content_length; } auto transfer_encoding = diff --git a/vendor/cpp-httplib/httplib.h b/vendor/cpp-httplib/httplib.h index ac1908f42..e01b3550b 100644 --- a/vendor/cpp-httplib/httplib.h +++ b/vendor/cpp-httplib/httplib.h @@ -8,28 +8,8 @@ #ifndef CPPHTTPLIB_HTTPLIB_H #define CPPHTTPLIB_HTTPLIB_H -#define CPPHTTPLIB_VERSION "0.37.0" -#define CPPHTTPLIB_VERSION_NUM "0x002500" - -/* - * Platform compatibility check - */ - -#if defined(_WIN32) && !defined(_WIN64) -#if defined(_MSC_VER) -#pragma message( \ - "cpp-httplib doesn't support 32-bit Windows. Please use a 64-bit compiler.") -#else -#warning \ - "cpp-httplib doesn't support 32-bit Windows. Please use a 64-bit compiler." -#endif -#elif defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ < 8 -#warning \ - "cpp-httplib doesn't support 32-bit platforms. Please use a 64-bit compiler." -#elif defined(__SIZEOF_SIZE_T__) && __SIZEOF_SIZE_T__ < 8 -#warning \ - "cpp-httplib doesn't support platforms where size_t is less than 64 bits." -#endif +#define CPPHTTPLIB_VERSION "0.37.1" +#define CPPHTTPLIB_VERSION_NUM "0x002501" #ifdef _WIN32 #if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0A00 @@ -2797,7 +2777,7 @@ inline size_t get_header_value_u64(const Headers &headers, std::advance(it, static_cast(id)); if (it != rng.second) { if (is_numeric(it->second)) { - return std::strtoull(it->second.data(), nullptr, 10); + return static_cast(std::strtoull(it->second.data(), nullptr, 10)); } else { is_invalid_value = true; }