diff --git a/common/common.cpp b/common/common.cpp index 992c883e6..574b2037f 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1,5 +1,7 @@ #include "common.h" #include "build-info.h" +// Change JSON_ASSERT from assert() to GGML_ASSERT: +#define JSON_ASSERT GGML_ASSERT #include "json.hpp" #include "json-schema-to-grammar.h" #include "llama.h" @@ -912,6 +914,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.instruct = true; return true; } + if (arg == "-cnv" || arg == "--conversation") { + params.conversation = true; + return true; + } if (arg == "-cml" || arg == "--chatml") { params.chatml = true; return true; @@ -1418,6 +1424,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --version show version and build info\n"); printf(" -i, --interactive run in interactive mode\n"); printf(" --interactive-first run in interactive mode and wait for input right away\n"); + printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n"); printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n"); printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n"); printf(" --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n"); @@ -1965,18 +1972,18 @@ static bool llama_download_file(const std::string & url, const std::string & pat try { metadata_in >> metadata; fprintf(stderr, "%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str()); - if (metadata.contains("url") && metadata["url"].is_string()) { - auto previous_url = metadata["url"].get(); + if (metadata.contains("url") && metadata.at("url").is_string()) { + auto previous_url = metadata.at("url").get(); if (previous_url != url) { fprintf(stderr, "%s: Model URL mismatch: %s != %s\n", __func__, url.c_str(), previous_url.c_str()); return false; } } - if (metadata.contains("etag") && metadata["etag"].is_string()) { - etag = metadata["etag"]; + if (metadata.contains("etag") && metadata.at("etag").is_string()) { + etag = metadata.at("etag"); } - if (metadata.contains("lastModified") && metadata["lastModified"].is_string()) { - last_modified = metadata["lastModified"]; + if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) { + last_modified = metadata.at("lastModified"); } } catch (const nlohmann::json::exception & e) { fprintf(stderr, "%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what()); diff --git a/common/common.h b/common/common.h index f0b8c3f30..940ee0ec5 100644 --- a/common/common.h +++ b/common/common.h @@ -156,6 +156,7 @@ struct gpt_params { bool random_prompt = false; // do not randomize prompt if none provided bool use_color = false; // use color to distinguish generations and inputs bool interactive = false; // interactive mode + bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix) bool chatml = false; // chatml mode (used for models trained on chatml syntax) bool prompt_cache_all = false; // save user input and generations to prompt cache bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it diff --git a/common/json-schema-to-grammar.h b/common/json-schema-to-grammar.h index e1abed303..41623b346 100644 --- a/common/json-schema-to-grammar.h +++ b/common/json-schema-to-grammar.h @@ -1,4 +1,8 @@ #pragma once + +#include "ggml.h" +// Change JSON_ASSERT from assert() to GGML_ASSERT: +#define JSON_ASSERT GGML_ASSERT #include "json.hpp" std::string json_schema_to_grammar(const nlohmann::ordered_json& schema); diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index a3fe67ee7..a26f45a5f 100755 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -67,7 +67,9 @@ models = [ {"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", }, {"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", }, {"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", }, + {"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", }, {"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", }, + {"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", }, ] # make directory "models/tokenizers" if it doesn't exist diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index f65d9320e..1dc18b2a5 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -9,11 +9,10 @@ import json import os import re import sys -from abc import ABC, abstractmethod from enum import IntEnum from pathlib import Path from hashlib import sha256 -from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterator, Sequence, TypeVar, cast +from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast, overload import numpy as np import torch @@ -25,7 +24,7 @@ if 'NO_LOCAL_GGUF' not in os.environ: sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf -from convert import LlamaHfVocab, permute +from convert import LlamaHfVocab logger = logging.getLogger("hf-to-gguf") @@ -44,29 +43,55 @@ class SentencePieceTokenTypes(IntEnum): AnyModel = TypeVar("AnyModel", bound="type[Model]") -class Model(ABC): +class Model: _model_classes: dict[str, type[Model]] = {} - def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool): + dir_model: Path + ftype: int + fname_out: Path + is_big_endian: bool + endianess: gguf.GGUFEndian + use_temp_file: bool + lazy: bool + part_names: list[str] + is_safetensors: bool + hparams: dict[str, Any] + gguf_writer: gguf.GGUFWriter + block_count: int + tensor_map: gguf.TensorNameMap + tensor_names: set[str] | None + + # subclasses should define this! + model_arch: gguf.MODEL_ARCH + + def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool): + if self.__class__ == Model: + raise TypeError(f"{self.__class__.__name__!r} should not be directly instantiated") self.dir_model = dir_model self.ftype = ftype self.fname_out = fname_out self.is_big_endian = is_big_endian self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE self.use_temp_file = use_temp_file - self.is_safetensors = self._is_model_safetensors() - self.num_parts = Model.count_model_parts(self.dir_model, ".safetensors" if self.is_safetensors else ".bin") - self.part_names = self._get_part_names() + self.lazy = not eager + self.part_names = Model.get_model_part_names(self.dir_model, ".safetensors") + self.is_safetensors = len(self.part_names) > 0 + if not self.is_safetensors: + self.part_names = Model.get_model_part_names(self.dir_model, ".bin") self.hparams = Model.load_hparams(self.dir_model) self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file) self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"]) + self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) + self.tensor_names = None - @property - @abstractmethod - def model_arch(self) -> gguf.MODEL_ARCH: - pass + @classmethod + def __init_subclass__(cls): + # can't use an abstract property, because overriding it without type errors + # would require using decorated functions instead of simply defining the property + if "model_arch" not in cls.__dict__: + raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}") - def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any: + def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: key = next((k for k in keys if k in self.hparams), None) if key is not None: return self.hparams[key] @@ -78,6 +103,22 @@ class Model(ABC): self._set_vocab_gpt2() def get_tensors(self) -> Iterator[tuple[str, Tensor]]: + tensor_names_from_parts: set[str] = set() + + if len(self.part_names) > 1: + self.tensor_names = set() + index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin" + index_name += ".index.json" + logger.info(f"gguf: loading model weight map from '{index_name}'") + with open(self.dir_model / index_name, "r", encoding="utf-8") as f: + index: dict[str, Any] = json.load(f) + weight_map = index.get("weight_map") + if weight_map is None or not isinstance(weight_map, dict): + raise ValueError(f"Can't load 'weight_map' from {index_name!r}") + self.tensor_names.update(weight_map.keys()) + else: + self.tensor_names = tensor_names_from_parts + for part_name in self.part_names: logger.info(f"gguf: loading model part '{part_name}'") ctx: ContextManager[Any] @@ -88,10 +129,33 @@ class Model(ABC): ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True)) with ctx as model_part: + tensor_names_from_parts.update(model_part.keys()) + for name in model_part.keys(): data = model_part.get_tensor(name) if self.is_safetensors else model_part[name] + if self.lazy: + data = LazyTorchTensor.from_eager(data) yield name, data + # only verify tensor name presence; it doesn't matter if they are not in the right files + if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0: + raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}") + + def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str: + name: str = gguf.TENSOR_NAMES[key] + if key not in gguf.MODEL_TENSORS[self.model_arch]: + raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}") + if "{bid}" in name: + assert bid is not None + name = name.format(bid=bid) + return name + suffix + + def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str: + new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes) + if new_name is None: + raise ValueError(f"Can not map tensor {name!r}") + return new_name + def set_gguf_parameters(self): self.gguf_writer.add_name(self.dir_model.name) self.gguf_writer.add_block_count(self.block_count) @@ -135,12 +199,27 @@ class Model(ABC): self.gguf_writer.add_file_type(self.ftype) logger.info(f"gguf: file type = {self.ftype}") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + + return [(self.map_tensor_name(name), data_torch)] + + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid, n_dims # unused + + return False + + def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid, n_dims # unused + + return False + def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,") + for name, data_torch in self.get_tensors(): # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): + if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): continue old_dtype = data_torch.dtype @@ -149,37 +228,52 @@ class Model(ABC): if data_torch.dtype not in (torch.float16, torch.float32): data_torch = data_torch.to(torch.float32) - data = data_torch.squeeze().numpy() + # use the first number-like part of the tensor name as the block id + bid = None + for part in name.split("."): + if part.isdecimal(): + bid = int(part) + break - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)): + data: np.ndarray = data # type hint + n_dims = len(data.shape) + data_dtype = data.dtype - n_dims = len(data.shape) - data_dtype = data.dtype + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + # when both are True, f32 should win + extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims) + extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims) - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) + # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors + extra_f32 = extra_f32 or n_dims == 1 or new_name.endswith("_norm.weight") - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) + # if f16 desired, convert any float32 2-dim weight tensors to float16 + extra_f16 = extra_f16 or (name.endswith(".weight") and n_dims >= 2) - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + # when both extra_f32 and extra_f16 are False, convert to float32 by default + if self.ftype == 1 and data_dtype == np.float16 and (extra_f32 or not extra_f16): + data = data.astype(np.float32) - self.gguf_writer.add_tensor(new_name, data) + if self.ftype == 1 and data_dtype == np.float32 and extra_f16 and not extra_f32: + data = data.astype(np.float16) + + # reverse shape to make it similar to the internal ggml dimension order + shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}" + + # n_dims is implicit in the shape + logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data.dtype}, shape = {shape_str}") + + self.gguf_writer.add_tensor(new_name, data) def write(self): self.write_tensors() self.gguf_writer.write_header_to_file() self.gguf_writer.write_kv_data_to_file() - self.gguf_writer.write_tensors_to_file() + self.gguf_writer.write_tensors_to_file(progress=True) self.gguf_writer.close() def write_vocab(self): @@ -188,16 +282,18 @@ class Model(ABC): self.gguf_writer.close() @staticmethod - def count_model_parts(dir_model: Path, prefix: str) -> int: - num_parts = 0 + def get_model_part_names(dir_model: Path, suffix: str) -> list[str]: + part_names: list[str] = [] for filename in os.listdir(dir_model): - if filename.endswith(prefix): - num_parts += 1 + if filename.endswith(suffix): + part_names.append(filename) - return num_parts + part_names.sort() + + return part_names @staticmethod - def load_hparams(dir_model): + def load_hparams(dir_model: Path): with open(dir_model / "config.json", "r", encoding="utf-8") as f: return json.load(f) @@ -205,32 +301,19 @@ class Model(ABC): def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: assert names - def func(modelcls: type[Model]): + def func(modelcls: AnyModel) -> AnyModel: for name in names: cls._model_classes[name] = modelcls return modelcls return func @classmethod - def from_model_architecture(cls, arch): + def from_model_architecture(cls, arch: str) -> type[Model]: try: return cls._model_classes[arch] except KeyError: raise NotImplementedError(f'Architecture {arch!r} not supported!') from None - def _is_model_safetensors(self) -> bool: - return Model.count_model_parts(self.dir_model, ".safetensors") > 0 - - def _get_part_names(self): - if self.is_safetensors: - if self.num_parts == 1: # there's only one .safetensors file - return ("model.safetensors",) - return (f"model-{n:05}-of-{self.num_parts:05}.safetensors" for n in range(1, self.num_parts + 1)) - - if self.num_parts == 1: # there's only one .bin file - return ("pytorch_model.bin",) - return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1)) - # used for GPT-2 BPE and WordPiece vocabs def get_vocab_base(self) -> tuple[list[str], list[int], str]: tokens: list[str] = [] @@ -314,9 +397,15 @@ class Model(ABC): if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8": # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01 res = "command-r" + if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea": + # ref: https://huggingface.co/Qwen/Qwen1.5-7B + res = "qwen2" if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166": # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf res = "olmo" + if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e": + # ref: https://huggingface.co/databricks/dbrx-instruct + res = "dbrx" if res is None: logger.warning("\n") @@ -414,22 +503,24 @@ class Model(ABC): if not tokenizer_path.is_file(): raise FileNotFoundError(f"File not found: {tokenizer_path}") - tokenizer = SentencePieceProcessor(str(tokenizer_path)) + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(str(tokenizer_path)) + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.id_to_piece(token_id) + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") - score = tokenizer.get_score(token_id) + score = tokenizer.GetScore(token_id) toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.is_unknown(token_id): + if tokenizer.IsUnknown(token_id): toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.is_control(token_id): + elif tokenizer.IsControl(token_id): toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.is_unused(token_id): + elif tokenizer.IsUnused(token_id): toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.is_byte(token_id): + elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE tokens.append(text) @@ -452,7 +543,7 @@ class Model(ABC): pad_count = vocab_size - len(tokens) logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") for i in range(1, pad_count + 1): - tokens.append(f"[PAD{i}]") + tokens.append(bytes(f"[PAD{i}]", encoding="utf-8")) scores.append(-1000.0) toktypes.append(SentencePieceTokenTypes.UNUSED) @@ -527,81 +618,52 @@ class BloomModel(Model): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams["n_layer"] - tensors = dict(self.get_tensors()) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - has_lm_head = True + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - for name, data_torch in tensors.items(): - if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys(): - has_lm_head = False + name = re.sub(r'transformer\.', '', name) - name = re.sub(r'transformer\.', '', name) + tensors: list[tuple[str, Tensor]] = [] - old_dtype = data_torch.dtype + if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): + # Map bloom-style qkv_linear to gpt-style qkv_linear + # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa + # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa + qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed)) + data_torch = torch.cat( + ( + qkv_weights[:, 0, :, :].reshape((-1, n_embed)), + qkv_weights[:, 1, :, :].reshape((-1, n_embed)), + qkv_weights[:, 2, :, :].reshape((-1, n_embed)), + ), + dim=0, + ) + logger.info("re-format attention.linear_qkv.weight") + elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): + qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head)) + data_torch = torch.cat( + ( + qkv_bias[:, 0, :].reshape((n_embed,)), + qkv_bias[:, 1, :].reshape((n_embed,)), + qkv_bias[:, 2, :].reshape((n_embed,)), + ), + dim=0, + ) + logger.info("re-format attention.linear_qkv.bias") - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + tensors.append((self.map_tensor_name(name), data_torch)) - data = data_torch.squeeze().numpy() + if name == "word_embeddings.weight": + assert self.tensor_names is not None - if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed)) - data = np.concatenate( - ( - qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed)), - ), - axis=0, - ) - logger.info("re-format attention.linear_qkv.weight") - elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): - qkv_bias = data.reshape((n_head, 3, n_embed // n_head)) - data = np.concatenate( - ( - qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,)), - ), - axis=0, - ) - logger.info("re-format attention.linear_qkv.bias") + # TODO: tie them at runtime, don't duplicate in the model file + if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - if not has_lm_head and name == "word_embeddings.weight": - self.gguf_writer.add_tensor("output.weight", data) - logger.info(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}") + return tensors @Model.register("MPTForCausalLM") @@ -637,50 +699,16 @@ class MPTModel(Model): else: self.gguf_writer.add_max_alibi_bias(0.0) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers")) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - old_dtype = data_torch.dtype + if "scales" in name: + new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales")) + new_name = new_name.replace("scales", "act.scales") + else: + new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias")) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - if "scales" in name: - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales")) - if new_name is not None: - new_name = new_name.replace("scales", "act.scales") - else: - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(new_name, data_torch)] @Model.register("OrionForCausalLM") @@ -720,48 +748,6 @@ class OrionModel(Model): # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571 self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"]) - def write_tensors(self): - # Collect tensors from generator object - model_kv = dict(self.get_tensors()) - block_count = self.hparams["num_hidden_layers"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) - @Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM") class BaichuanModel(Model): @@ -803,60 +789,26 @@ class BaichuanModel(Model): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - def write_tensors(self): - # Collect tensors from generator object - model_kv = dict(self.get_tensors()) - block_count = self.hparams["num_hidden_layers"] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: head_count = self.hparams["num_attention_heads"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) head_count_kv = self.hparams.get("num_key_value_heads", head_count) - for i in range(block_count): - if (w := model_kv.get(f"model.layers.{i}.self_attn.W_pack.weight")) is not None: - logger.info(f"Unpacking and permuting layer {i}") - model_kv[f"model.layers.{i}.self_attn.q_proj.weight"] = \ - self._reverse_hf_permute_part(w, 0, head_count, head_count) - model_kv[f"model.layers.{i}.self_attn.k_proj.weight"] = \ - self._reverse_hf_permute_part(w, 1, head_count, head_count_kv) - model_kv[f"model.layers.{i}.self_attn.v_proj.weight"] = \ - self._reverse_hf_part(w, 2) - del model_kv[f"model.layers.{i}.self_attn.W_pack.weight"] + tensors: list[tuple[str, Tensor]] = [] - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue + if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight": + logger.info(f"Unpacking and permuting layer {bid}") + tensors = [ + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), + self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), + self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), + self._reverse_hf_part(data_torch, 2)), + ] + else: + tensors = [(self.map_tensor_name(name), data_torch)] - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + return tensors def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -888,7 +840,7 @@ class XverseModel(Model): dir_model = self.dir_model hparams = self.hparams - tokens: list[bytearray] = [] + tokens: list[bytes] = [] toktypes: list[int] = [] from transformers import AutoTokenizer @@ -896,7 +848,7 @@ class XverseModel(Model): vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size - reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} + reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} added_vocab = tokenizer.get_added_vocab() for token_id in range(vocab_size): @@ -959,55 +911,19 @@ class XverseModel(Model): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - def write_tensors(self): - # Collect tensors from generator object - model_kv = dict(self.get_tensors()) - block_count = self.hparams["num_hidden_layers"] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + head_count = self.hparams["num_attention_heads"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) head_count_kv = self.hparams.get("num_key_value_heads", head_count) - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue + # HF models permute some of the tensors, so we need to undo that + if name.endswith("q_proj.weight"): + data_torch = self._reverse_hf_permute(data_torch, head_count, head_count) + if name.endswith("k_proj.weight"): + data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # HF models permute some of the tensors, so we need to undo that - if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count) - if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -1048,71 +964,31 @@ class FalconModel(Model): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams.get("num_hidden_layers") - if block_count is None: - block_count = self.hparams["n_layer"] # old name + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - n_head = self.hparams.get("num_attention_heads") - if n_head is None: - n_head = self.hparams["n_head"] # old name + # QKV tensor transform + # The original query_key_value tensor contains n_head_kv "kv groups", + # each consisting of n_head/n_head_kv query weights followed by one key + # and one value weight (shared by all query heads in the kv group). + # This layout makes it a big pain to work with in GGML. + # So we rearrange them here,, so that we have n_head query weights + # followed by n_head_kv key weights followed by n_head_kv value weights, + # in contiguous fashion. + # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - n_head_kv = self.hparams.get("num_kv_heads") - if n_head_kv is None: - n_head_kv = self.hparams.get("n_head_kv", 1) # old name + if "query_key_value" in name: + n_head = self.find_hparam(["num_attention_heads", "n_head"]) + n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1 + head_dim = self.hparams["hidden_size"] // n_head - head_dim = self.hparams["hidden_size"] // n_head - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) + q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) + k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) + v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) + data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - for name, data_torch in self.get_tensors(): - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # QKV tensor transform - # The original query_key_value tensor contains n_head_kv "kv groups", - # each consisting of n_head/n_head_kv query weights followed by one key - # and one value weight (shared by all query heads in the kv group). - # This layout makes it a big pain to work with in GGML. - # So we rearrange them here,, so that we have n_head query weights - # followed by n_head_kv key weights followed by n_head_kv value weights, - # in contiguous fashion. - # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - - if "query_key_value" in name: - qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) - q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) - k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) - v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) - data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] @Model.register("GPTBigCodeForCausalLM") @@ -1158,7 +1034,7 @@ class RefactModel(Model): self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: hidden_dim = self.hparams["n_embd"] inner_dim = 4 * hidden_dim hidden_dim = int(2 * inner_dim / 3) @@ -1167,56 +1043,23 @@ class RefactModel(Model): n_head = self.hparams["n_head"] n_head_kv = 1 head_dim = self.hparams["n_embd"] // n_head - block_count = self.hparams["n_layer"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + tensors: list[tuple[str, Tensor]] = [] - tensors = dict(self.get_tensors()) - for i in range(block_count): - if (w := tensors.get(f"transformer.h.{i}.attn.kv.weight")) is not None: - tensors[f"model.layers.{i}.self_attn.k_proj.weight"] = w[:n_head_kv * head_dim] - tensors[f"model.layers.{i}.self_attn.v_proj.weight"] = w[n_head_kv * head_dim:] - del tensors[f"transformer.h.{i}.attn.kv.weight"] - if (w := tensors.get(f"transformer.h.{i}.attn.q.weight")) is not None: - tensors[f"model.layers.{i}.self_attn.q_proj.weight"] = w - del tensors[f"transformer.h.{i}.attn.q.weight"] - if (w := tensors.get(f"transformer.h.{i}.mlp.gate_up_proj.weight")) is not None: - tensors[f"model.layers.{i}.mlp.gate_proj.weight"] = w[:ff_dim] - tensors[f"model.layers.{i}.mlp.up_proj.weight"] = w[ff_dim:] - del tensors[f"transformer.h.{i}.mlp.gate_up_proj.weight"] + if bid is not None: + if name == f"transformer.h.{bid}.attn.kv.weight": + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim])) + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:])) + elif name == f"transformer.h.{bid}.attn.q.weight": + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch)) + elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight": + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])) + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])) - for name, data_torch in tensors.items(): - old_dtype = data_torch.dtype + if len(tensors) == 0: + tensors.append((self.map_tensor_name(name), data_torch)) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight",)) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return tensors @Model.register("PersimmonForCausalLM") @@ -1251,22 +1094,11 @@ class PersimmonModel(Model): # self.gguf_writer.add_bos_token_id(71013) # self.gguf_writer.add_eos_token_id(71013) - def write_tensors(self): - block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid, n_dims # unused - for name, data_torch in self.get_tensors(): - if name.endswith(".self_attention.rotary_emb.inv_freq"): - continue - old_dtype = data_torch.dtype - # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) - data = data_torch.to(torch.float32).squeeze().numpy() - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - n_dims = len(data.shape) - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) + # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) + return True @Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM") @@ -1296,84 +1128,67 @@ class StableLMModel(Model): self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"])) + _q_norms: list[dict[str, Tensor]] | None = None + _k_norms: list[dict[str, Tensor]] | None = None + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + n_head = self.hparams["num_attention_heads"] + n_kv_head = self.hparams["num_key_value_heads"] + + if name.find("q_layernorm.norms") != -1: + assert bid is not None + + if self._q_norms is None: + self._q_norms = [{} for _ in range(self.block_count)] + + self._q_norms[bid][name] = data_torch + + if len(self._q_norms[bid]) >= n_head: + return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm") + else: + return [] + + if name.find("k_layernorm.norms") != -1: + assert bid is not None + + if self._k_norms is None: + self._k_norms = [{} for _ in range(self.block_count)] + + self._k_norms[bid][name] = data_torch + + if len(self._k_norms[bid]) >= n_kv_head: + return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm") + else: + return [] + + return [(self.map_tensor_name(name), data_torch)] + + def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"): + datas: list[Tensor] = [] + # extract the norms in order + for xid in range(n_head): + ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" + datas.append(norms[ename]) + del norms[ename] + data_torch = torch.stack(datas, dim=0) + + merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" + new_name = self.map_tensor_name(merged_name) + + return [(new_name, data_torch)] + def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") - n_kv_head = self.hparams.get("num_key_value_heads") - q_norms = dict() - k_norms = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue + super().write_tensors() - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - n_dims = len(data.shape) - if name.find("q_layernorm.norms") != -1: - q_norms[name] = data - if len(q_norms) >= (block_count * n_head): - self._stack_qk_norm(block_count, name, tensor_map, n_head, q_norms, n_dims, layer_name="q_layernorm") - continue - if name.find("k_layernorm.norms") != -1: - k_norms[name] = data - if len(k_norms) >= (block_count * n_kv_head): - self._stack_qk_norm(block_count, name, tensor_map, n_kv_head, k_norms, n_dims, layer_name="k_layernorm") - continue - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.debug(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - def _stack_qk_norm(self, block_count, name, tensor_map, n_head, norms, n_dims, layer_name="q_layernorm"): - for bid in range(block_count): - datas = [] - for xid in range(n_head): - ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" - datas.append(norms[ename]) - del norms[ename] - data = np.stack(datas, axis=0) - data_dtype = data.dtype - merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and not new_name.endswith("_norm.weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + if self._q_norms is not None or self._k_norms is not None: + # flatten two `list[dict[str, Tensor]]` into a single `list[str]` + norms = ( + [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else [] + ) + ( + [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else [] + ) + if len(norms) > 0: + raise ValueError(f"Unprocessed norms: {norms}") @Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM") @@ -1413,102 +1228,69 @@ class LlamaModel(Model): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - # Same as super class, but permuting q_proj, k_proj - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") + @staticmethod + def permute(weights: Tensor, n_head: int, n_head_kv: int | None): + if n_head_kv is not None and n_head != n_head_kv: + n_head = n_head_kv + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) + + _experts: list[dict[str, Tensor]] | None = None + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") - n_experts = self.hparams.get("num_local_experts") - experts = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): - continue - old_dtype = data_torch.dtype + if name.endswith("q_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_head) + if name.endswith("k_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + # process the experts separately + if name.find("block_sparse_moe.experts") != -1: + n_experts = self.hparams["num_local_experts"] - data = data_torch.numpy() + assert bid is not None - if name.endswith("q_proj.weight"): - data = permute(data, n_head, n_head) - if name.endswith("k_proj.weight"): - data = permute(data, n_head, n_kv_head) + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] - data = data.squeeze() + self._experts[bid][name] = data_torch - # process the experts separately - if name.find("block_sparse_moe.experts") != -1: - experts[name] = data - if len(experts) >= n_experts: - # merge the experts into a single 3d tensor - for bid in range(block_count): - for wid in range(1, 4): - full = True - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight" - if ename not in experts: - full = False - break - if not full: - continue + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] - datas = [] - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight" - datas.append(experts[ename]) - del experts[ename] + # merge the experts into a single 3d tensor + for wid in ["w1", "w2", "w3"]: + datas: list[Tensor] = [] - data = np.stack(datas, axis=0) - data_dtype = data.dtype + for xid in range(n_experts): + ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + data_torch = torch.stack(datas, dim=0) - if self.ftype == 1 and data_dtype == np.float32: - data = data.astype(np.float16) + merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight" + new_name = self.map_tensor_name(merged_name) - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + tensors.append((new_name, data_torch)) + return tensors + else: + return [] - logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") + return [(self.map_tensor_name(name), data_torch)] - self.gguf_writer.add_tensor(new_name, data) - continue + def write_tensors(self): + super().write_tensors() - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # 1d tensors need to be converted to float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts.keys()}") + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") @Model.register("GrokForCausalLM") @@ -1525,89 +1307,44 @@ class GrokModel(Model): super().set_gguf_parameters() self.gguf_writer.add_name("Grok") - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_experts = self.hparams.get("num_local_experts") - experts = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue + _experts: list[dict[str, Tensor]] | None = None - old_dtype = data_torch.dtype + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # process the experts separately + if name.find(".moe.") != -1: + n_experts = self.hparams["num_local_experts"] - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + assert bid is not None - data = data_torch.squeeze().numpy() + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] - # process the experts separately - if name.find(".moe.") != -1: - experts[name] = data - if len(experts) >= n_experts: - # merge the experts into a single 3d tensor - for bid in range(block_count): - for wid in ["linear", "linear_1", "linear_v"]: - full = True - for xid in range(n_experts): - ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" - if ename not in experts: - full = False - break - if not full: - continue + self._experts[bid][name] = data_torch - datas = [] - for xid in range(n_experts): - ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" - datas.append(experts[ename]) - del experts[ename] + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] - data = np.stack(datas, axis=0) - data_dtype = data.dtype + # merge the experts into a single 3d tensor + for wid in ["linear", "linear_1", "linear_v"]: + datas: list[Tensor] = [] - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) + for xid in range(n_experts): + ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] - if self.ftype == 1 and data_dtype == np.float32: - data = data.astype(np.float16) + data_torch = torch.stack(datas, dim=0) - merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" + merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + new_name = self.map_tensor_name(merged_name) - logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") + tensors.append((new_name, data_torch)) + return tensors + else: + return [] - self.gguf_writer.add_tensor(new_name, data) - continue - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] @Model.register("DbrxForCausalLM") @@ -1640,68 +1377,45 @@ class DbrxModel(Model): self.gguf_writer.add_file_type(self.ftype) logger.info(f"gguf: file type = {self.ftype}") - def write_tensors(self): - block_count = self.hparams.get("n_layers") - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in self.get_tensors(): - n_expert = self.hparams["ffn_config"]["moe_num_experts"] - n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] - n_embd = self.hparams["d_model"] + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose - # original implementation expects (n_expert, n_ff, n_embd) for all experts weights - # But llama.cpp moe graph works differently - # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions - # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor - exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert} - "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - experts = False - for exp_tensor_name in exp_tensor_names.keys(): - if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1: - experts = True - data_torch = data_torch.view(n_expert, n_ff, n_embd) - if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None: - data_torch = data_torch.permute(*permute_tensor) - break + n_expert = self.hparams["ffn_config"]["moe_num_experts"] + n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] + n_embd = self.hparams["d_model"] - old_dtype = data_torch.dtype + # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose + # original implementation expects (n_expert, n_ff, n_embd) for all experts weights + # But llama.cpp moe graph works differently + # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions + # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor + exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} + "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert} + "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} + experts = False - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + for exp_tensor_name in exp_tensor_names.keys(): + if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1: + experts = True + data_torch = data_torch.view(n_expert, n_ff, n_embd) + if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None: + data_torch = data_torch.permute(*permute_tensor) + break - data = data_torch.squeeze().numpy() + # map tensor names + # In MoE models the ffn tensors are typically most of the model weights, + # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight. + # Every other model has the weight names ending in .weight, + # let's assume that is the convention which is not the case for dbrx: + # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 + new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - # map tensor names - # In MoE models the ffn tensors are typically most of the model weights, - # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight. - # Every other model has the weight names ending in .weight, - # let's assume that is the convention which is not the case for dbrx: - # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 - new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + return [(new_name, data_torch)] - n_dims = len(data.shape) - data_dtype = data.dtype + def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del name, new_name, bid # unused - # Most of the codebase that takes in 1D tensors only handles F32 tensors - # and most of the outputs tensors are F32. - if data_dtype != np.float32 and n_dims == 1: - raise ValueError(f"Can not map tensor {name!r}: all 1D tensors must be F32") - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and n_dims > 1: - data = data.astype(np.float16) - - logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return n_dims > 1 @Model.register("MiniCPMForCausalLM") @@ -1734,53 +1448,19 @@ class MiniCPMModel(Model): .reshape(weights.shape) ) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + + n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue - old_dtype = data_torch.dtype + # HF models permute some of the tensors, so we need to undo that + if name.endswith(("q_proj.weight")): + data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) + if name.endswith(("k_proj.weight")): + data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # HF models permute some of the tensors, so we need to undo that - if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) - if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] @Model.register("QWenLMHeadModel") @@ -1824,46 +1504,6 @@ class QwenModel(Model): self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - def write_tensors(self): - block_count = self.hparams["num_hidden_layers"] - model_kv = dict(self.get_tensors()) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) - @Model.register("Qwen2ForCausalLM") class Qwen2Model(Model): @@ -1885,92 +1525,52 @@ class Qwen2MoeModel(Model): if (n_experts := self.hparams.get("num_experts")) is not None: self.gguf_writer.add_expert_count(n_experts) + _experts: list[dict[str, Tensor]] | None = None + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # process the experts separately + if name.find("experts") != -1: + n_experts = self.hparams["num_experts"] + assert bid is not None + + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] + + self._experts[bid][name] = data_torch + + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] + + # merge the experts into a single 3d tensor + for w_name in ["down_proj", "gate_proj", "up_proj"]: + datas: list[Tensor] = [] + + for xid in range(n_experts): + ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] + + data_torch = torch.stack(datas, dim=0) + + merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" + + new_name = self.map_tensor_name(merged_name) + + tensors.append((new_name, data_torch)) + return tensors + else: + return [] + + return [(self.map_tensor_name(name), data_torch)] + def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_experts = self.hparams.get("num_experts") - experts = dict() - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): - continue + super().write_tensors() - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # process the experts separately - if name.find("experts") != -1: - experts[name] = data - if len(experts) >= n_experts * 3: - # merge the experts into a single 3d tensor - for bid in range(block_count): - for w_name in ["down_proj", "gate_proj", "up_proj"]: - full = True - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - if ename not in experts: - full = False - break - if not full: - continue - - datas = [] - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - datas.append(experts[ename]) - del experts[ename] - - data = np.stack(datas, axis=0) - data_dtype = data.dtype - - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - if self.ftype == 1 and data_dtype == np.float32: - data = data.astype(np.float16) - - merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - - new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - logger.debug(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - continue - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")): - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.debug(f"{new_name}, n_dims = {n_dims}, shape = {data.shape}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts.keys()}") + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") @Model.register("GPT2LMHeadModel") @@ -1987,54 +1587,27 @@ class GPT2Model(Model): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias", ".attn.masked_bias")): - continue + tensors: list[tuple[str, Tensor]] = [] - if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): - data_torch = data_torch.transpose(1, 0) + # we don't need these + if name.endswith((".attn.bias", ".attn.masked_bias")): + return tensors - old_dtype = data_torch.dtype + if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): + data_torch = data_torch.transpose(1, 0) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + new_name = self.map_tensor_name(name) - data = data_torch.squeeze().numpy() + tensors.append((new_name, data_torch)) - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + # note: GPT2 output is tied to (same as) wte in original model + if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - # note: GPT2 output is tied to (same as) wte in original model - if new_name == "token_embd.weight": - logger.info(f"output.weight, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor("output.weight", data) + return tensors @Model.register("PhiForCausalLM") @@ -2074,7 +1647,8 @@ class Phi3MiniModel(Model): if not tokenizer_path.is_file(): raise ValueError(f'Error: Missing {tokenizer_path}') - tokenizer = SentencePieceProcessor(str(tokenizer_path)) + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) @@ -2084,18 +1658,18 @@ class Phi3MiniModel(Model): for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.id_to_piece(token_id) + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") - score = tokenizer.get_score(token_id) + score = tokenizer.GetScore(token_id) toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.is_unknown(token_id): + if tokenizer.IsUnknown(token_id): toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.is_control(token_id): + elif tokenizer.IsControl(token_id): toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.is_unused(token_id): + elif tokenizer.IsUnused(token_id): toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.is_byte(token_id): + elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE tokens[token_id] = text @@ -2181,51 +1755,18 @@ class PlamoModel(Model): data_torch = torch.reshape(data_torch, (5120, 5120)) return data_torch - def write_tensors(self): - block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - for name, data_torch in self.get_tensors(): - if "self_attn.rotary_emb.inv_freq" in name: - continue + new_name = self.map_tensor_name(name) - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + # shuffle for broadcasting of gqa in ggml_mul_mat + if new_name.endswith("attn_q.weight"): + data_torch = self.shuffle_attn_q_weight(data_torch) + elif new_name.endswith("attn_output.weight"): + data_torch = self.shuffle_attn_output_weight(data_torch) - # shuffle for broadcasting of gqa in ggml_mul_mat - if new_name.endswith("attn_q.weight"): - data_torch = self.shuffle_attn_q_weight(data_torch) - elif new_name.endswith("attn_output.weight"): - data_torch = self.shuffle_attn_output_weight(data_torch) - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(new_name, data_torch)] @Model.register("CodeShellForCausalLM") @@ -2248,51 +1789,21 @@ class CodeShellModel(Model): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) self.gguf_writer.add_rope_scaling_factor(1.0) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - tensors = dict(self.get_tensors()) - has_lm_head = "lm_head.weight" in tensors.keys() or "output.weight" in tensors.keys() - for name, data_torch in tensors.items(): - # we don't need these - if name.endswith((".attn.rotary_emb.inv_freq")): - continue + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - old_dtype = data_torch.dtype + new_name = self.map_tensor_name(name) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)] - data = data_torch.squeeze().numpy() + if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): + assert self.tensor_names is not None - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): + # copy tok_embd.weight to output.weight + tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) - - if not has_lm_head and name == "transformer.wte.weight": - self.gguf_writer.add_tensor("output.weight", data) - logger.info(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}") + return tensors @Model.register("InternLM2ForCausalLM") @@ -2321,27 +1832,29 @@ class InternLM2Model(Model): sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - tokenizer = SentencePieceProcessor(str(tokenizer_path)) + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(str(tokenizer_path)) + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) for token_id in range(vocab_size): - piece = tokenizer.id_to_piece(token_id) + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") - score = tokenizer.get_score(token_id) + score = tokenizer.GetScore(token_id) if text == b"\x00": # (TODO): fixme # Hack here and replace the \x00 characters. - logger.debug(f"InternLM2 convert token '{text}' to '🐉'!") - text = "🐉" + logger.warning(f"InternLM2 convert token '{text}' to '🐉'!") + text = "🐉".encode("utf-8") toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.is_unknown(token_id): + if tokenizer.IsUnknown(token_id): toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.is_control(token_id): + elif tokenizer.IsControl(token_id): toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.is_unused(token_id): + elif tokenizer.IsUnused(token_id): toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.is_byte(token_id): + elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE tokens.append(text) @@ -2378,13 +1891,15 @@ in chat mode so that the conversation can end normally.") special_vocab.add_to_gguf(self.gguf_writer) def _try_get_sft_eos(self, tokenizer): - unused_145_list = tokenizer.encode('[UNUSED_TOKEN_145]') - im_end_list = tokenizer.encode('<|im_end|>') + unused_145_list = tokenizer.Encode('[UNUSED_TOKEN_145]') + im_end_list = tokenizer.Encode('<|im_end|>') + eos_token = None assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1) if len(unused_145_list) == 1: eos_token = unused_145_list[0] if len(im_end_list) == 1: eos_token = im_end_list[0] + assert eos_token return eos_token def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int): @@ -2405,71 +1920,36 @@ in chat mode so that the conversation can end normally.") self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - def post_write_tensors(self, tensor_map, name, data_torch): - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - self.gguf_writer.add_tensor(new_name, data) - - def write_tensors(self): - from einops import rearrange - - num_heads = self.hparams.get("num_attention_heads") - num_kv_heads = self.hparams.get("num_key_value_heads") - hidden_size = self.hparams.get("hidden_size") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + num_heads = self.hparams["num_attention_heads"] + num_kv_heads = self.hparams["num_key_value_heads"] + hidden_size = self.hparams["hidden_size"] q_per_kv = num_heads // num_kv_heads head_dim = hidden_size // num_heads num_groups = num_heads // q_per_kv - block_count = self.hparams["num_hidden_layers"] - model_kv = dict(self.get_tensors()) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) qkv_pattern = r"model\.layers\.(\d+)\.attention\.wqkv" - for name, data_torch in model_kv.items(): - # we don't need these - if name.endswith(".rotary_emb.inv_freq"): - continue - if re.match(qkv_pattern, name): - bid = re.findall(qkv_pattern, name)[0] - qkv = data_torch - qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim) - q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :] - # The model weights of q and k equire additional reshape. - q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads) - k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads) - v = rearrange(v, " o g n i -> o (g n i)").T - self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wq.weight", q) - self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wk.weight", k) - self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wv.weight", v) - else: - self.post_write_tensors(tensor_map, name, data_torch) + if re.match(qkv_pattern, name): + bid = re.findall(qkv_pattern, name)[0] + qkv = data_torch + # qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim) + qkv = qkv.T.reshape((-1, num_groups, q_per_kv + 2, head_dim)) + q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :] + # The model weights of q and k equire additional reshape. + # q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads) + q = self._hf_permute_qk(q.reshape((q.shape[0], -1)).T, num_heads, num_heads) + # k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads) + k = self._hf_permute_qk(k.reshape((k.shape[0], -1)).T, num_heads, num_kv_heads) + # v = rearrange(v, " o g n i -> o (g n i)").T + v = v.reshape((v.shape[0], -1)).T + return [ + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k), + (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v), + ] + else: + return [(self.map_tensor_name(name), data_torch)] @Model.register("BertModel", "CamembertModel") @@ -2534,43 +2014,20 @@ class BertModel(Model): special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) - def write_tensors(self): - tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) - tensors = dict(self.get_tensors()) - for name, data_torch in tensors.items(): - # we are only using BERT for embeddings so we don't need the pooling layer - if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): - continue # we don't need these + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + # we are only using BERT for embeddings so we don't need the pooling layer + if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): + return [] # we don't need these - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + return [(self.map_tensor_name(name), data_torch)] - data = data_torch.squeeze().numpy() - n_dims = len(data.shape) - new_dtype: type[np.floating[Any]] + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del new_name, bid, n_dims # unused - if ( - self.ftype == 1 and name.endswith(".weight") and n_dims == 2 - and name != "embeddings.token_type_embeddings.weight" # not used with get_rows, must be F32 - ): - # if f16 desired, convert any float32 2-dim weight tensors to float16 - new_dtype = np.float16 - else: - # if f32 desired, convert any float16 to float32 - new_dtype = np.float32 - - logger.info(f"{new_name}, n_dims = {n_dims}, {data_torch.dtype} --> {new_dtype}") - - if data.dtype != new_dtype: - data = data.astype(new_dtype) - - self.gguf_writer.add_tensor(new_name, data) + # not used with get_rows, must be F32 + return name == "embeddings.token_type_embeddings.weight" @Model.register("NomicBertModel") @@ -2636,45 +2093,20 @@ class GemmaModel(Model): self.gguf_writer.add_value_length(hparams["head_dim"]) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - for name, data_torch in self.get_tensors(): - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model - # To prevent errors, skip loading lm_head.weight. - if name == "lm_head.weight": - logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - continue + # lm_head is not used in llama.cpp, while autoawq will include this tensor in model + # To prevent errors, skip loading lm_head.weight. + if name == "lm_head.weight": + logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") + return [] - old_dtype = data_torch.dtype + # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 + if name.endswith("norm.weight"): + data_torch = data_torch + 1 - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 - if name.endswith("norm.weight"): - data_torch = data_torch + 1 - data = data_torch.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] @Model.register("Starcoder2ForCausalLM") @@ -2697,6 +2129,8 @@ class MambaModel(Model): if (self.dir_model / "tokenizer.json").is_file(): self._set_vocab_gpt2() + elif (self.dir_model / "tokenizer.model").is_file(): + self._set_vocab_sentencepiece() else: # Use the GPT-NeoX tokenizer when no tokenizer files are present tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf" @@ -2704,28 +2138,34 @@ class MambaModel(Model): neox_reader = gguf.GGUFReader(tokenizer_path, "r") field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL) - self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1])) + self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8") if field else "gpt2") field = neox_reader.get_field(gguf.Keys.Tokenizer.PRE) - self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1])) + self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else "mpt") field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST) + assert field self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size]) field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE) + assert field self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES) + assert field self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data]) field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID) - self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0]) + self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0] if field else 1) field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID) - self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0]) + self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0] if field else 0) field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID) - self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0]) + self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0] if field else 0) + + field = neox_reader.get_field(gguf.Keys.Tokenizer.PAD_ID) + self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0] if field else 0) def set_gguf_parameters(self): d_model = self.find_hparam(["hidden_size", "d_model"]) @@ -2754,59 +2194,42 @@ class MambaModel(Model): self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps) self.gguf_writer.add_file_type(self.ftype) - def write_tensors(self): - block_count = self.hparams["n_layer"] - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + _tok_embd = None - tok_embd = None - tok_embd_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.TOKEN_EMBD] + ".weight" - output_name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.OUTPUT] + ".weight" + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused - for name, data_torch in self.get_tensors(): - old_dtype = data_torch.dtype + output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) + tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD) - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + new_name = self.map_tensor_name(name) - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") + if name.endswith(".A_log"): + logger.debug("A_log --> A ==> " + new_name) + data_torch = -torch.exp(data_torch) - if name.endswith(".A_log"): - logger.debug("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) + # assuming token_embd.weight is seen before output.weight + if self._tok_embd is not None and new_name == output_name: + if torch.equal(self._tok_embd, data_torch): + logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting") + return [] + elif new_name == tok_embd_name: + self._tok_embd = data_torch - # assuming token_embd.weight is seen before output.weight - if tok_embd is not None and new_name == output_name: - if torch.equal(tok_embd, data_torch): - logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting") - continue - if new_name == tok_embd_name: - tok_embd = data_torch + return [(new_name, data_torch)] - data = data_torch.squeeze().numpy() + def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: + del n_dims # unused - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert big float32 2-dim weight tensors to float16 - new_weight_name = new_name[:-len(".weight")] if new_name.endswith(".weight") else "" - if self.ftype == 1 and data_dtype == np.float32 and new_weight_name.endswith((".ssm_in", ".ssm_out", "token_embd", "output")) and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return bid is not None and new_name in ( + self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ + gguf.MODEL_TENSOR.SSM_CONV1D, + gguf.MODEL_TENSOR.SSM_X, + gguf.MODEL_TENSOR.SSM_DT, + gguf.MODEL_TENSOR.SSM_A, + gguf.MODEL_TENSOR.SSM_D, + ] + ) @Model.register("CohereForCausalLM") @@ -2840,55 +2263,144 @@ class OlmoModel(Model): # Same as super class, but permuting q_proj, k_proj # Copied from: LlamaModel - def write_tensors(self): - block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) - tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - n_head = self.hparams.get("num_attention_heads") + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + + n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") - for name, data_torch in self.get_tensors(): - old_dtype = data_torch.dtype - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) + if name.endswith("q_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_head) + if name.endswith("k_proj.weight"): + data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - data = data_torch.numpy() - - if name.endswith("q_proj.weight"): - data = permute(data, n_head, n_head) - if name.endswith("k_proj.weight"): - data = permute(data, n_head, n_kv_head) - - data = data.squeeze() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # 1d tensors need to be converted to float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and n_dims == 2: - data = data.astype(np.float16) - - logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") - - self.gguf_writer.add_tensor(new_name, data) + return [(self.map_tensor_name(name), data_torch)] ###### CONVERSION LOGIC ###### +# tree of lazy tensors +class LazyTorchTensor: + _meta: Tensor + _data: Tensor | None + _args: tuple + _func: Callable[[tuple], Tensor] | None + + def __init__(self, *, meta: Tensor, data: Tensor | None = None, args: tuple = (), func: Callable[[tuple], Tensor] | None = None): + self._meta = meta + self._data = data + self._args = args + self._func = func + + @staticmethod + def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any: + # TODO: dict and set + if isinstance(o, (list, tuple)): + L = [] + for item in o: + L.append(LazyTorchTensor._recurse_apply(item, fn)) + if isinstance(o, tuple): + L = tuple(L) + return L + elif isinstance(o, LazyTorchTensor): + return fn(o) + else: + return o + + def _wrap_fn(self, fn: Callable, use_self: bool = False) -> Callable[[Any], LazyTorchTensor]: + def wrapped_fn(*args, **kwargs): + if kwargs is None: + kwargs = {} + args = ((self,) if use_self else ()) + args + + meta_args = LazyTorchTensor._recurse_apply(args, lambda t: t._meta) + + return LazyTorchTensor(meta=fn(*meta_args, **kwargs), args=args, func=lambda a: fn(*a, **kwargs)) + return wrapped_fn + + def __getattr__(self, __name: str) -> Any: + meta_attr = getattr(self._meta, __name) + if callable(meta_attr): + return self._wrap_fn(getattr(torch.Tensor, __name), use_self=True) + elif isinstance(meta_attr, torch.Tensor): + # for things like self.T + return self._wrap_fn(lambda s: getattr(s, __name))(self) + else: + return meta_attr + + _dtype_map: dict[torch.dtype, type] = { + torch.float16: np.float16, + torch.float32: np.float32, + } + + def numpy(self) -> gguf.LazyTensor: + dtype = self._dtype_map[self.dtype] + return gguf.LazyTensor(lambda: LazyTorchTensor.to_eager(self).numpy(), dtype=dtype, shape=self.shape) + + @overload + @staticmethod + def to_eager(t: Tensor | LazyTorchTensor) -> Tensor: ... + + @overload + @staticmethod + def to_eager(t: tuple) -> tuple: ... + + @staticmethod + def to_eager(t: Any) -> Any: + def simple_to_eager(_t: LazyTorchTensor) -> Tensor: + # wake up the lazy tensor + if _t._data is None and _t._func is not None: + # recurse into its arguments + _t._args = LazyTorchTensor.to_eager(_t._args) + _t._data = _t._func(_t._args) + if _t._data is not None: + return _t._data + else: + raise ValueError(f"Could not compute lazy tensor {_t!r} with args {_t._args!r}") + + # recurse into lists and/or tuples, keeping their structure + return LazyTorchTensor._recurse_apply(t, simple_to_eager) + + @staticmethod + def from_eager(t: Tensor) -> Tensor: + if (t.__class__ == LazyTorchTensor): + return t + return LazyTorchTensor(meta=t.detach().to("meta"), data=t) # type: ignore + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + del types # unused + + if kwargs is None: + kwargs = {} + + if func is torch.Tensor.numpy: + return args[0].numpy() + if func is torch.equal: + eager_args = LazyTorchTensor.to_eager(args) + return func(*eager_args, **kwargs) + + return LazyTorchTensor._wrap_fn(args[0], func)(*args, **kwargs) + + # special methods bypass __getattr__, so they need to be added manually + # ref: https://docs.python.org/3/reference/datamodel.html#special-lookup + # NOTE: LazyTorchTensor can't be a subclass of Tensor (and then be used + # as self._meta is currently used), because then the following + # operations would by default not be wrapped, and so not propagated + # when the tensor is made eager. + # It's better to get non-silent errors for not-yet-supported operators. + # TODO: add more when needed to avoid clutter, or find a more concise way + def __neg__(self, *args): # mamba + return self._wrap_fn(torch.Tensor.__neg__)(self, *args) + + def __add__(self, *args): # gemma + return self._wrap_fn(torch.Tensor.__add__)(self, *args) + + def __getitem__(self, *args): # bloom falcon refact internlm2 + return self._wrap_fn(torch.Tensor.__getitem__)(self, *args) + + def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser( description="Convert a huggingface model to a GGML compatible file") @@ -2898,7 +2410,8 @@ def parse_args() -> argparse.Namespace: ) parser.add_argument( "--awq-path", type=Path, default=None, - help="Path to scale awq cache file") + help="Path to scale awq cache file", + ) parser.add_argument( "--outfile", type=Path, help="path to write to; default: based on input", @@ -2907,14 +2420,30 @@ def parse_args() -> argparse.Namespace: "--outtype", type=str, choices=["f32", "f16"], default="f16", help="output format - use f32 for float32, f16 for float16", ) - parser.add_argument("--bigendian", action="store_true", help="model is executed on big endian machine") + parser.add_argument( + "--bigendian", action="store_true", + help="model is executed on big endian machine", + ) parser.add_argument( "model", type=Path, help="directory containing model file", ) - parser.add_argument("--use-temp-file", action="store_true", help="use the tempfile library while processing (helpful when running out of memory, process killed)") - parser.add_argument("--model-name", type=str, default=None, help="name of the model") - parser.add_argument("--verbose", action="store_true", help="increase output verbosity") + parser.add_argument( + "--use-temp-file", action="store_true", + help="use the tempfile library while processing (helpful when running out of memory, process killed)", + ) + parser.add_argument( + "--no-lazy", action="store_true", + help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)", + ) + parser.add_argument( + "--model-name", type=str, default=None, + help="name of the model", + ) + parser.add_argument( + "--verbose", action="store_true", + help="increase output verbosity", + ) return parser.parse_args() @@ -2960,7 +2489,7 @@ def main() -> None: with torch.inference_mode(): model_class = Model.from_model_architecture(hparams["architectures"][0]) - model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file) + model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file, args.no_lazy) logger.info("Set model parameters") model_instance.set_gguf_parameters() diff --git a/convert.py b/convert.py index 7f0b6b749..148bfd66a 100755 --- a/convert.py +++ b/convert.py @@ -284,6 +284,7 @@ class Params: n_experts = None n_experts_used = None f_rope_freq_base = None + n_ff = None # hack to determine LLaMA v1 vs v2 vs CodeLlama if config.get("moe"): @@ -308,6 +309,8 @@ class Params: n_experts_used = config["moe"]["num_experts_per_tok"] f_rope_freq_base = 1e6 + assert n_ff is not None + return Params( n_vocab = model["tok_embeddings.weight"].shape[0], n_embd = config["dim"], @@ -462,7 +465,8 @@ class SentencePieceVocab(Vocab): # not found in alternate location either raise FileNotFoundError('Cannot find tokenizer.model') - self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) + self.sentencepiece_tokenizer = SentencePieceProcessor() + self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer)) vocab_size = self.sentencepiece_tokenizer.vocab_size() new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size} @@ -482,23 +486,23 @@ class SentencePieceVocab(Vocab): def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: tokenizer = self.sentencepiece_tokenizer for i in range(tokenizer.vocab_size()): - piece = tokenizer.id_to_piece(i) + piece = tokenizer.IdToPiece(i) text = piece.encode("utf-8") - score: float = tokenizer.get_score(i) + score: float = tokenizer.GetScore(i) toktype = gguf.TokenType.NORMAL - if tokenizer.is_unknown(i): + if tokenizer.IsUnknown(i): toktype = gguf.TokenType.UNKNOWN - if tokenizer.is_control(i): + if tokenizer.IsControl(i): toktype = gguf.TokenType.CONTROL # NOTE: I think added_tokens are user defined. # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED - if tokenizer.is_unused(i): + if tokenizer.IsUnused(i): toktype = gguf.TokenType.UNUSED - if tokenizer.is_byte(i): + if tokenizer.IsByte(i): toktype = gguf.TokenType.BYTE yield text, score, toktype @@ -906,7 +910,7 @@ class LazyUnpickler(pickle.Unpickler): def rebuild_from_type_v2(func, new_type, args, state): return func(*args) - CLASSES = { + CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = { # getattr used here as a workaround for mypy not being smart enough to determine # the staticmethods have a __func__ attribute. ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), @@ -1508,25 +1512,27 @@ def main(args_in: list[str] | None = None) -> None: if args.big_endian: endianess = gguf.GGUFEndian.BIG - params = Params.load(model_plus) - if params.n_ctx == -1: - if args.ctx is None: - msg = """\ - The model doesn't have a context size, and you didn't specify one with --ctx - Please specify one with --ctx: - - LLaMA v1: --ctx 2048 - - LLaMA v2: --ctx 4096""" - parser.error(textwrap.dedent(msg)) - params.n_ctx = args.ctx + params = None + if args.pad_vocab or not args.vocab_only: + params = Params.load(model_plus) + if params.n_ctx == -1: + if args.ctx is None: + msg = """\ + The model doesn't have a context size, and you didn't specify one with --ctx + Please specify one with --ctx: + - LLaMA v1: --ctx 2048 + - LLaMA v2: --ctx 4096""" + parser.error(textwrap.dedent(msg)) + params.n_ctx = args.ctx - if args.outtype: - params.ftype = { - "f32": GGMLFileType.AllF32, - "f16": GGMLFileType.MostlyF16, - "q8_0": GGMLFileType.MostlyQ8_0, - }[args.outtype] + if args.outtype: + params.ftype = { + "f32": GGMLFileType.AllF32, + "f16": GGMLFileType.MostlyF16, + "q8_0": GGMLFileType.MostlyQ8_0, + }[args.outtype] - logger.info(f"params = {params}") + logger.info(f"params = {params}") model_parent_path = model_plus.paths[0].parent vocab_path = Path(args.vocab_dir or args.model or model_parent_path) @@ -1539,6 +1545,17 @@ def main(args_in: list[str] | None = None) -> None: if not args.outfile: raise ValueError("need --outfile if using --vocab-only") outfile = args.outfile + if params is None: + params = Params( + n_vocab = vocab.vocab_size, + n_embd = 1, + n_layer = 1, + n_ctx = 1, + n_ff = 1, + n_head = 1, + n_head_kv = 1, + f_norm_eps = 1e-5, + ) OutputFile.write_vocab_only(outfile, params, vocab, special_vocab, endianess=endianess, pad_vocab=args.pad_vocab) logger.info(f"Wrote {outfile}") diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 1e4c6caf7..cc9ee8c74 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -363,6 +363,9 @@ int main(int argc, char ** argv) { params.interactive_first = true; params.antiprompt.emplace_back("<|im_start|>user\n"); } + else if (params.conversation) { + params.interactive_first = true; + } // enable interactive mode if interactive start is specified if (params.interactive_first) { @@ -734,7 +737,7 @@ int main(int argc, char ** argv) { // display text if (input_echo && display) { for (auto id : embd) { - const std::string token_str = llama_token_to_piece(ctx, id); + const std::string token_str = llama_token_to_piece(ctx, id, !params.conversation); printf("%s", token_str.c_str()); if (embd.size() > 1) { @@ -817,7 +820,7 @@ int main(int argc, char ** argv) { if (n_past > 0 && is_interacting) { LOG("waiting for user input\n"); - if (params.instruct || params.chatml) { + if (params.conversation || params.instruct || params.chatml) { printf("\n> "); } @@ -827,7 +830,7 @@ int main(int argc, char ** argv) { } std::string buffer; - if (!params.input_prefix.empty()) { + if (!params.input_prefix.empty() && !params.conversation) { LOG("appending input prefix: '%s'\n", params.input_prefix.c_str()); printf("%s", params.input_prefix.c_str()); } @@ -851,7 +854,7 @@ int main(int argc, char ** argv) { // Entering a empty line lets the user pass control back if (buffer.length() > 1) { // append input suffix if any - if (!params.input_suffix.empty()) { + if (!params.input_suffix.empty() && !params.conversation) { LOG("appending input suffix: '%s'\n", params.input_suffix.c_str()); printf("%s", params.input_suffix.c_str()); } diff --git a/examples/server/README.md b/examples/server/README.md index a7c3f0b5f..650317991 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -331,7 +331,7 @@ Notice that each `probs` is an array of length `n_probs`. `content`: Set the text to tokenize. - Note that a special `BOS` token is never inserted. + `add_special`: Boolean indicating if special tokens, i.e. `BOS`, should be inserted. Default: `false` - **POST** `/detokenize`: Convert tokens to text. diff --git a/examples/server/public/favicon.ico b/examples/server/public/favicon.ico new file mode 100644 index 000000000..89e154a0a Binary files /dev/null and b/examples/server/public/favicon.ico differ diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 0c6e22e72..5688c1a7a 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -13,6 +13,8 @@ // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 #include "httplib.h" +// Change JSON_ASSERT from assert() to GGML_ASSERT: +#define JSON_ASSERT GGML_ASSERT #include "json.hpp" // auto generated files (update with ./deps.sh) @@ -860,7 +862,7 @@ struct server_context { slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep); // process "json_schema" and "grammar" - if (data.contains("json_schema") && !data["json_schema"].is_null() && data.contains("grammar") && !data["grammar"].is_null()) { + if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) { send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST); return false; } else if (data.contains("json_schema") && !data.contains("grammar")) { @@ -1513,7 +1515,7 @@ struct server_context { // add subtasks for (int i = 0; i < prompt_count; i++) { json subtask_data = multiprompt_task.data; - subtask_data["prompt"] = subtask_data["prompt"][i]; + subtask_data["prompt"] = subtask_data.at("prompt")[i]; // subtasks inherit everything else (infill mode, embedding mode, etc.) request_completion(subtask_ids[i], id_multi, subtask_data, multiprompt_task.infill, multiprompt_task.embedding); @@ -1533,7 +1535,7 @@ struct server_context { } if (task.data.contains("system_prompt")) { - system_prompt_set(task.data["system_prompt"]); + system_prompt_set(task.data.at("system_prompt")); for (server_slot & slot : slots) { slot.n_past = 0; @@ -1645,7 +1647,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SLOT_SAVE: { - int id_slot = task.data["id_slot"]; + int id_slot = task.data.at("id_slot"); server_slot * slot = get_slot(id_slot); if (slot == nullptr) { send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); @@ -1655,8 +1657,8 @@ struct server_context { const size_t token_count = slot->cache_tokens.size(); const int64_t t_start = ggml_time_us(); - std::string filename = task.data["filename"]; - std::string filepath = task.data["filepath"]; + std::string filename = task.data.at("filename"); + std::string filepath = task.data.at("filepath"); const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count); @@ -1680,7 +1682,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SLOT_RESTORE: { - int id_slot = task.data["id_slot"]; + int id_slot = task.data.at("id_slot"); server_slot * slot = get_slot(id_slot); if (slot == nullptr) { send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); @@ -1689,8 +1691,8 @@ struct server_context { const int64_t t_start = ggml_time_us(); - std::string filename = task.data["filename"]; - std::string filepath = task.data["filepath"]; + std::string filename = task.data.at("filename"); + std::string filepath = task.data.at("filepath"); slot->cache_tokens.resize(slot->n_ctx); size_t token_count = 0; @@ -1722,7 +1724,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SLOT_ERASE: { - int id_slot = task.data["id_slot"]; + int id_slot = task.data.at("id_slot"); server_slot * slot = get_slot(id_slot); if (slot == nullptr) { send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); @@ -3137,8 +3139,8 @@ int main(int argc, char ** argv) { server_task_result result = ctx_server.queue_results.recv(task.id); ctx_server.queue_results.remove_waiting_task_id(task.id); - const int n_idle_slots = result.data["idle"]; - const int n_processing_slots = result.data["processing"]; + const int n_idle_slots = result.data.at("idle"); + const int n_processing_slots = result.data.at("processing"); json health = { {"status", "ok"}, @@ -3148,7 +3150,7 @@ int main(int argc, char ** argv) { res.status = 200; // HTTP OK if (sparams.slots_endpoint && req.has_param("include_slots")) { - health["slots"] = result.data["slots"]; + health["slots"] = result.data.at("slots"); } if (n_idle_slots == 0) { @@ -3192,7 +3194,7 @@ int main(int argc, char ** argv) { server_task_result result = ctx_server.queue_results.recv(task.id); ctx_server.queue_results.remove_waiting_task_id(task.id); - res.set_content(result.data["slots"].dump(), "application/json"); + res.set_content(result.data.at("slots").dump(), "application/json"); res.status = 200; // HTTP OK }; @@ -3219,32 +3221,32 @@ int main(int argc, char ** argv) { json data = result.data; - const uint64_t n_prompt_tokens_processed = data["n_prompt_tokens_processed"]; - const uint64_t t_prompt_processing = data["t_prompt_processing"]; + const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed"); + const uint64_t t_prompt_processing = data.at("t_prompt_processing"); - const uint64_t n_tokens_predicted = data["n_tokens_predicted"]; - const uint64_t t_tokens_generation = data["t_tokens_generation"]; + const uint64_t n_tokens_predicted = data.at("n_tokens_predicted"); + const uint64_t t_tokens_generation = data.at("t_tokens_generation"); - const int32_t kv_cache_used_cells = data["kv_cache_used_cells"]; + const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells"); // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names json all_metrics_def = json { {"counter", {{ {"name", "prompt_tokens_total"}, {"help", "Number of prompt tokens processed."}, - {"value", (uint64_t) data["n_prompt_tokens_processed_total"]} + {"value", (uint64_t) data.at("n_prompt_tokens_processed_total")} }, { {"name", "prompt_seconds_total"}, {"help", "Prompt process time"}, - {"value", (uint64_t) data["t_prompt_processing_total"] / 1.e3} + {"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3} }, { {"name", "tokens_predicted_total"}, {"help", "Number of generation tokens processed."}, - {"value", (uint64_t) data["n_tokens_predicted_total"]} + {"value", (uint64_t) data.at("n_tokens_predicted_total")} }, { {"name", "tokens_predicted_seconds_total"}, {"help", "Predict process time"}, - {"value", (uint64_t) data["t_tokens_generation_total"] / 1.e3} + {"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3} }}}, {"gauge", {{ {"name", "prompt_tokens_seconds"}, @@ -3261,15 +3263,15 @@ int main(int argc, char ** argv) { },{ {"name", "kv_cache_tokens"}, {"help", "KV-cache tokens."}, - {"value", (uint64_t) data["kv_cache_tokens_count"]} + {"value", (uint64_t) data.at("kv_cache_tokens_count")} },{ {"name", "requests_processing"}, {"help", "Number of request processing."}, - {"value", (uint64_t) data["processing"]} + {"value", (uint64_t) data.at("processing")} },{ {"name", "requests_deferred"}, {"help", "Number of request deferred."}, - {"value", (uint64_t) data["deferred"]} + {"value", (uint64_t) data.at("deferred")} }}} }; @@ -3280,8 +3282,8 @@ int main(int argc, char ** argv) { const auto & metrics_def = el.value(); for (const auto & metric_def : metrics_def) { - const std::string name = metric_def["name"]; - const std::string help = metric_def["help"]; + const std::string name = metric_def.at("name"); + const std::string help = metric_def.at("help"); auto value = json_value(metric_def, "value", 0.); prometheus << "# HELP llamacpp:" << name << " " << help << "\n" @@ -3290,7 +3292,7 @@ int main(int argc, char ** argv) { } } - const int64_t t_start = data["t_start"]; + const int64_t t_start = data.at("t_start"); res.set_header("Process-Start-Time-Unix", std::to_string(t_start)); res.set_content(prometheus.str(), "text/plain; version=0.0.4"); @@ -3299,7 +3301,7 @@ int main(int argc, char ** argv) { const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) { json request_data = json::parse(req.body); - std::string filename = request_data["filename"]; + std::string filename = request_data.at("filename"); if (!validate_file_name(filename)) { res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST)); return; @@ -3329,7 +3331,7 @@ int main(int argc, char ** argv) { const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) { json request_data = json::parse(req.body); - std::string filename = request_data["filename"]; + std::string filename = request_data.at("filename"); if (!validate_file_name(filename)) { res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST)); return; @@ -3648,7 +3650,8 @@ int main(int argc, char ** argv) { std::vector tokens; if (body.count("content") != 0) { - tokens = ctx_server.tokenize(body["content"], false); + const bool add_special = json_value(body, "add_special", false); + tokens = ctx_server.tokenize(body.at("content"), add_special); } const json data = format_tokenizer_response(tokens); return res.set_content(data.dump(), "application/json; charset=utf-8"); @@ -3660,7 +3663,7 @@ int main(int argc, char ** argv) { std::string content; if (body.count("tokens") != 0) { - const std::vector tokens = body["tokens"]; + const std::vector tokens = body.at("tokens"); content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend()); } @@ -3683,10 +3686,10 @@ int main(int argc, char ** argv) { json prompt; if (body.count("input") != 0) { is_openai = true; - prompt = body["input"]; + prompt = body.at("input"); } else if (body.count("content") != 0) { // with "content", we only support single prompt - prompt = std::vector{body["content"]}; + prompt = std::vector{body.at("content")}; } else { res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST)); return; @@ -3705,7 +3708,7 @@ int main(int argc, char ** argv) { if (!result.error) { if (result.data.count("results")) { // result for multi-task - responses = result.data["results"]; + responses = result.data.at("results"); } else { // result for single task responses = std::vector{result.data}; diff --git a/examples/server/tests/features/server.feature b/examples/server/tests/features/server.feature index 646a4e49d..d21c09135 100644 --- a/examples/server/tests/features/server.feature +++ b/examples/server/tests/features/server.feature @@ -7,6 +7,7 @@ Feature: llama.cpp server And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models And a model file test-model.gguf And a model alias tinyllama-2 + And BOS token is 1 And 42 as server seed # KV Cache corresponds to the total amount of tokens # that can be stored across all independent sequences: #4130 @@ -91,7 +92,18 @@ Feature: llama.cpp server """ What is the capital of France ? """ - Then tokens can be detokenize + Then tokens can be detokenized + And tokens do not begin with BOS + + Scenario: Tokenize w/ BOS + Given adding special tokens + When tokenizing: + """ + What is the capital of Germany? + """ + Then tokens begin with BOS + Given first token is removed + Then tokens can be detokenized Scenario: Models available Given available models diff --git a/examples/server/tests/features/steps/steps.py b/examples/server/tests/features/steps/steps.py index b8dbef21d..f4b1ac1d7 100644 --- a/examples/server/tests/features/steps/steps.py +++ b/examples/server/tests/features/steps/steps.py @@ -376,6 +376,11 @@ def step_seed(context, seed): context.seed.append(seed) +@step('BOS token is {bos:d}') +def step_bos_token(context, bos): + context.bos = bos + + @step('a prefix prompt') def step_prompt_prefix(context): context.prompt_prefix = context_text(context) @@ -656,21 +661,29 @@ async def all_embeddings_are_generated(context): assert_embeddings(context.tasks_result.pop().pop()) +@step('adding special tokens') +def step_tokenize_set_add_special(context): + context.tokenize_add_special = True + + @step('tokenizing') @async_run_until_complete async def step_tokenize(context): context.tokenized_text = context_text(context) async with aiohttp.ClientSession() as session: + tokenize_args = { + "content": context.tokenized_text, + } + if getattr(context, 'tokenize_add_special', None) is not None: + tokenize_args['add_special'] = context.tokenize_add_special async with session.post(f'{context.base_url}/tokenize', - json={ - "content": context.tokenized_text, - }) as response: + json=tokenize_args) as response: assert response.status == 200 tokenize_json = await response.json() context.tokens = tokenize_json['tokens'] -@step('tokens can be detokenize') +@step('tokens can be detokenized') @async_run_until_complete async def step_detokenize(context): assert len(context.tokens) > 0 @@ -685,6 +698,21 @@ async def step_detokenize(context): assert context.tokenized_text == detokenize_json['content'].strip() +@step('tokens begin with BOS') +def step_strings_for_tokenization(context): + assert context.tokens[0] == context.bos + + +@step('tokens do not begin with BOS') +def step_strings_for_tokenization(context): + assert context.tokens[0] != context.bos + + +@step('first token is removed') +def step_strings_for_tokenization(context): + context.tokens = context.tokens[1:] + + @step('an OPTIONS request is sent from {origin}') @async_run_until_complete async def step_options_request(context, origin): @@ -911,7 +939,7 @@ async def oai_chat_completions(user_prompt, while event_received: event_received = False async for line_in_bytes in response.content: - line = line_in_bytes.decode('utf8') + line = line_in_bytes.decode('utf-8') line = line.rstrip('\n').rstrip('\r') if line == '': continue diff --git a/examples/server/themes/README.md b/examples/server/themes/README.md new file mode 100644 index 000000000..62e721a27 --- /dev/null +++ b/examples/server/themes/README.md @@ -0,0 +1,5 @@ +# LLaMA.cpp Server Wild Theme + +Simple themes directory of sample "public" directories. To try any of these add --path to your run like `server --path=wild`. + +![image](wild/wild.png) diff --git a/examples/server/themes/buttons-top/README.md b/examples/server/themes/buttons-top/README.md new file mode 100644 index 000000000..808c4cf81 --- /dev/null +++ b/examples/server/themes/buttons-top/README.md @@ -0,0 +1,7 @@ +# LLaMA.cpp Server Buttons Top Theme + +Simple tweaks to the UI. Chat buttons at the top of the page instead of bottom so you can hit Stop instead of chasing it down the page. + +To use simply run server with `--path=themes/buttons_top` + +![image](buttons_top.png) diff --git a/examples/server/themes/buttons-top/buttons_top.png b/examples/server/themes/buttons-top/buttons_top.png new file mode 100644 index 000000000..c54454519 Binary files /dev/null and b/examples/server/themes/buttons-top/buttons_top.png differ diff --git a/examples/server/themes/buttons-top/favicon.ico b/examples/server/themes/buttons-top/favicon.ico new file mode 100644 index 000000000..89e154a0a Binary files /dev/null and b/examples/server/themes/buttons-top/favicon.ico differ diff --git a/examples/server/themes/buttons-top/index.html b/examples/server/themes/buttons-top/index.html new file mode 100644 index 000000000..6af30d307 --- /dev/null +++ b/examples/server/themes/buttons-top/index.html @@ -0,0 +1,1057 @@ + + + + + + + llama.cpp - chat + + + + + + + +
+ +
+
+ + + + diff --git a/examples/server/themes/wild/README.md b/examples/server/themes/wild/README.md new file mode 100644 index 000000000..560bcc81b --- /dev/null +++ b/examples/server/themes/wild/README.md @@ -0,0 +1,5 @@ +# LLaMA.cpp Server Wild Theme + +Simple tweaks to the UI. To use simply run server with `--path=themes/wild` + +![image](wild.png) diff --git a/examples/server/themes/wild/favicon.ico b/examples/server/themes/wild/favicon.ico new file mode 100644 index 000000000..89e154a0a Binary files /dev/null and b/examples/server/themes/wild/favicon.ico differ diff --git a/examples/server/themes/wild/index.html b/examples/server/themes/wild/index.html new file mode 100644 index 000000000..772e716cd --- /dev/null +++ b/examples/server/themes/wild/index.html @@ -0,0 +1,1061 @@ + + + + + + + llama.cpp - chat + + + + + + + +
+ +
+
+ + + + diff --git a/examples/server/themes/wild/llama_cpp.png b/examples/server/themes/wild/llama_cpp.png new file mode 100644 index 000000000..bad1dc9fc Binary files /dev/null and b/examples/server/themes/wild/llama_cpp.png differ diff --git a/examples/server/themes/wild/llamapattern.png b/examples/server/themes/wild/llamapattern.png new file mode 100644 index 000000000..2a159ce6a Binary files /dev/null and b/examples/server/themes/wild/llamapattern.png differ diff --git a/examples/server/themes/wild/wild.png b/examples/server/themes/wild/wild.png new file mode 100644 index 000000000..46ffa0f3e Binary files /dev/null and b/examples/server/themes/wild/wild.png differ diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 1a2212502..d872b63f5 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -3,6 +3,8 @@ #include "llama.h" #include "common.h" +// Change JSON_ASSERT from assert() to GGML_ASSERT: +#define JSON_ASSERT GGML_ASSERT #include "json.hpp" #include @@ -49,18 +51,18 @@ extern bool server_log_json; #define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__) #define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__) -static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra); +static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra); template -static T json_value(const json &body, const std::string &key, const T &default_value) { +static T json_value(const json & body, const std::string & key, const T & default_value) { // Fallback null to default value - if (body.contains(key) && !body.at(key).is_null()){ + if (body.contains(key) && !body.at(key).is_null()) { try { - return body.value(key, default_value); - } - catch (nlohmann::json_abi_v3_11_3::detail::type_error const&){ - std::string message = "Wrong type supplied for parameter '" + key + "'. Expected '" + typeid(default_value).name() + "', using default value."; - server_log("WARN", __func__, __LINE__, message.c_str(), body); + return body.at(key); + } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) { + std::stringstream ss; + ss << "Wrong type supplied for parameter '" << key << "'. Expected '" << json(default_value).type_name() << "', using default value."; + LOG_WARNING(ss.str().c_str(), body); return default_value; } } else { @@ -68,16 +70,16 @@ static T json_value(const json &body, const std::string &key, const T &default_v } } -static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra) { +static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra) { std::stringstream ss_tid; ss_tid << std::this_thread::get_id(); - json log = nlohmann::ordered_json{ + json log = json{ {"tid", ss_tid.str()}, {"timestamp", time(nullptr)}, }; if (server_log_json) { - log.merge_patch( { + log.merge_patch({ {"level", level}, {"function", function}, {"line", line}, @@ -98,7 +100,7 @@ static inline void server_log(const char *level, const char *function, int line, } std::stringstream ss; ss << buf << " |"; - for (const auto& el : log.items()) + for (const auto & el : log.items()) { const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace); ss << " " << el.key() << "=" << value; @@ -373,11 +375,11 @@ static json oaicompat_completion_params_parse( llama_params["top_p"] = json_value(body, "top_p", 1.0); // Apply chat template to the list of messages - llama_params["prompt"] = format_chat(model, chat_template, body["messages"]); + llama_params["prompt"] = format_chat(model, chat_template, body.at("messages")); // Handle "stop" field - if (body.contains("stop") && body["stop"].is_string()) { - llama_params["stop"] = json::array({body["stop"].get()}); + if (body.contains("stop") && body.at("stop").is_string()) { + llama_params["stop"] = json::array({body.at("stop").get()}); } else { llama_params["stop"] = json_value(body, "stop", json::array()); } diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 199b74402..0e30f438d 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -1647,7 +1647,7 @@ static void ggml_cuda_op_mul_mat( } } -static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ +static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1)); GGML_ASSERT(ggml_backend_buffer_is_cuda(src0->buffer)); GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation @@ -1670,7 +1670,7 @@ static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const gg ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream); } -static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ +static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); GGML_ASSERT(!ggml_is_permuted(src0)); @@ -2416,32 +2416,304 @@ GGML_CALL static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { GGML_UNUSED(backend); } +static void set_ggml_graph_node_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) { + graph_node_properties->node_address = node->data; + graph_node_properties->node_op = node->op; + for (int i = 0; i < GGML_MAX_DIMS; i++) { + graph_node_properties->ne[i] = node->ne[i]; + graph_node_properties->nb[i] = node->nb[i]; + } + for (int i = 0; i < GGML_MAX_SRC; i++) { + graph_node_properties->src_address[i] = node->src[i] ? node->src[i]->data : nullptr; + } +} + +static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) { + if (node->data != graph_node_properties->node_address && + node->op != GGML_OP_CPY && + node->op != GGML_OP_VIEW) { + return false; + } + + if (node->op != graph_node_properties->node_op) { + return false; + } + + for (int i = 0; i < GGML_MAX_DIMS; i++) { + if (node->ne[i] != graph_node_properties->ne[i]) { + return false; + } + if (node->nb[i] != graph_node_properties->nb[i]) { + return false; + } + } + + for (int i = 0; i < GGML_MAX_SRC; i++) { + if (node->src[i] && + node->src[i]->data != graph_node_properties->src_address[i] && + node->op != GGML_OP_CPY && + node->op != GGML_OP_VIEW + ) { + return false; + } + } + return true; +} + GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; ggml_cuda_set_device(cuda_ctx->device); - for (int i = 0; i < cgraph->n_nodes; i++) { - ggml_tensor * node = cgraph->nodes[i]; +#ifdef USE_CUDA_GRAPH + static const bool disable_cuda_graphs_due_to_env = (getenv("GGML_CUDA_DISABLE_GRAPHS") != nullptr); - if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { - continue; + // Objects required for CUDA Graph + if (cuda_ctx->cuda_graph == nullptr) { + cuda_ctx->cuda_graph.reset(new ggml_cuda_graph()); + } + + bool use_cuda_graph = true; + bool cuda_graph_update_required = false; + // pointer to CUDA cpy kernel, which is required to identify + // kernel parameters which need updated in the graph for each token + void * ggml_cuda_cpy_fn_ptr = nullptr; + + if (cuda_ctx->cuda_graph->graph == nullptr) { + if (ggml_cuda_info().devices[cuda_ctx->device].cc < CC_AMPERE) { + cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to GPU architecture\n", __func__); +#endif + } + } + + // Disable CUDA graphs in presence of env var, old GPU, use-case which is changing too rapidly, + // or previous graph capture failure. + // Also disable for multi-gpu for now. TO DO investigate + if (disable_cuda_graphs_due_to_env + || cuda_ctx->cuda_graph->disable_due_to_gpu_arch + || cuda_ctx->cuda_graph->disable_due_to_too_many_updates + || cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture) { + use_cuda_graph = false; + } + + if (use_cuda_graph) { + if (cuda_ctx->cuda_graph->instance == nullptr) { + cuda_graph_update_required = true; } + // Check if the graph size has changed + if (cuda_ctx->cuda_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes) { + cuda_graph_update_required = true; + cuda_ctx->cuda_graph->ggml_graph_properties.resize(cgraph->n_nodes); + } + + // Loop over nodes in GGML graph to determine if CUDA graph update is required + // and store properties to allow this comparison for the next token + for (int i = 0; i < cgraph->n_nodes; i++) { + bool has_matching_properties = true; + if (!cuda_graph_update_required) { + has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]); + } + if (!has_matching_properties) { + cuda_graph_update_required = true; + } + set_ggml_graph_node_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]); + } + + // Loop over nodes in GGML graph to obtain info needed for CUDA graph + cuda_ctx->cuda_graph->updated_kernel_arg.clear(); + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + + if (node->src[0] && ggml_backend_buffer_is_cuda_split(node->src[0]->buffer)) { + use_cuda_graph = false; // Split buffers are not supported by CUDA graph capture #ifndef NDEBUG - assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); - for (int j = 0; j < GGML_MAX_SRC; j++) { - if (node->src[j] != nullptr) { - assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer)); + fprintf(stderr, "%s: disabling CUDA graphs due to split buffer\n", __func__); +#endif + } + + if (node->op == GGML_OP_MUL_MAT_ID) { + use_cuda_graph = false; // This node type is not supported by CUDA graph capture +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to mul_mat_id\n", __func__); +#endif + } + + if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1) { + // disable CUDA graphs for batch size > 1 for now. + // Changes in batch size or context size can cause changes to the grid size of some kernels. + use_cuda_graph = false; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); +#endif + } + + if (node->op == GGML_OP_CPY) { + // store the copy op parameter which changes with each token. + cuda_ctx->cuda_graph->updated_kernel_arg.push_back((char **) &(node->src[1]->data)); + if (ggml_cuda_cpy_fn_ptr == nullptr) { + // store a pointer to the copy op CUDA kernel to identify it later + ggml_cuda_cpy_fn_ptr = ggml_cuda_cpy_fn(node->src[0], node->src[1]); + } + } + + if (!use_cuda_graph) { + break; } } + + // Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates. + if (cuda_graph_update_required) { + cuda_ctx->cuda_graph->number_consecutive_updates++; + } else { + cuda_ctx->cuda_graph->number_consecutive_updates = 0; + } + + if (cuda_ctx->cuda_graph->number_consecutive_updates >= 4) { + cuda_ctx->cuda_graph->disable_due_to_too_many_updates = true; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to too many consecutive updates\n", __func__); +#endif + } + } + + if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture + CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed)); + } + +#else + bool use_cuda_graph = false; + bool cuda_graph_update_required = false; +#endif // USE_CUDA_GRAPH + + bool graph_evaluated_or_captured = false; + + while (!graph_evaluated_or_captured) { + // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph. + // With the use of CUDA graphs, the execution will be performed by the graph launch. + if (!use_cuda_graph || cuda_graph_update_required) { + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + + if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { + continue; + } + +#ifndef NDEBUG + assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); + for (int j = 0; j < GGML_MAX_SRC; j++) { + if (node->src[j] != nullptr) { + assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer)); + } + } #endif - bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); - if (!ok) { - fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); + bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); + if (!ok) { + fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); + } + GGML_ASSERT(ok); + } } - GGML_ASSERT(ok); + +#ifdef USE_CUDA_GRAPH + if (use_cuda_graph && cuda_graph_update_required) { // End CUDA graph capture + if (cuda_ctx->cuda_graph->graph != nullptr) { + CUDA_CHECK(cudaGraphDestroy(cuda_ctx->cuda_graph->graph)); + cuda_ctx->cuda_graph->graph = nullptr; + } + CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph)); + +#if 0 + if (disable_cuda_graphs_due_to_failed_capture) { + use_cuda_graph = false; + cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture = true; +#ifndef NDEBUG + fprintf(stderr, "%s: disabling CUDA graphs due to failed graph capture\n", __func__); +#endif + } else { + graph_evaluated_or_captured = true; // CUDA graph has been captured + } +#endif + graph_evaluated_or_captured = true; // CUDA graph has been captured + } else { + graph_evaluated_or_captured = true; // ggml graph has been directly evaluated + } + } + + if (use_cuda_graph) { + if (cuda_ctx->cuda_graph->instance == nullptr) { // Create executable graph from captured graph. + CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); + } + + // Perform update to graph (if required for this token), and change copy parameter (required for every token) + + if (cuda_graph_update_required) { + // Extract nodes from graph + if (cuda_ctx->cuda_graph->num_nodes == 0) { + // First call with null argument gets number of nodes in graph + CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, nullptr, &cuda_ctx->cuda_graph->num_nodes)); + } + // Subsequent call with non-null argument gets nodes + cuda_ctx->cuda_graph->nodes.resize(cuda_ctx->cuda_graph->num_nodes); + cuda_ctx->cuda_graph->params.resize(cuda_ctx->cuda_graph->num_nodes); + if (cuda_ctx->cuda_graph->num_nodes > 0) { + CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, cuda_ctx->cuda_graph->nodes.data(), &cuda_ctx->cuda_graph->num_nodes)); + + // Loop over nodes, and extract kernel parameters from each node + for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) { + cudaGraphNodeType node_type; + CUDA_CHECK(cudaGraphNodeGetType(cuda_ctx->cuda_graph->nodes[i], &node_type)); + if (node_type == cudaGraphNodeTypeKernel) { + cudaError_t stat = cudaGraphKernelNodeGetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i]); // Get params using runtime + if (stat == cudaErrorInvalidDeviceFunction) { + // Fails due to incorrect handling by CUDA runtime of CUDA BLAS node. + // We don't need to update blas nodes, so clear error and move on. + cudaGetLastError(); + } else { + GGML_ASSERT(stat == cudaSuccess); + } + } + } + } + } + + // One of the arguments to the copy kernel is updated for each token, hence we need to + // replace that argument with the updated value in the CUDA graph + if (!cuda_graph_update_required) { // on update steps, the live parameters will already be captured + int k = 0; + for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) { + if (cuda_ctx->cuda_graph->params[i].func == ggml_cuda_cpy_fn_ptr) { + char ** updated_kernel_arg_ptr = cuda_ctx->cuda_graph->updated_kernel_arg.at(k++); + cuda_ctx->cuda_graph->params[i].kernelParams[1] = updated_kernel_arg_ptr; + CUDA_CHECK(cudaGraphKernelNodeSetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i])); + } + } + } + + // Update graph executable + cudaGraphExecUpdateResultInfo result_info; + cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info); + if (stat == cudaErrorGraphExecUpdateFailure) { +#ifndef NDEBUG + fprintf(stderr, "%s: CUDA graph update failed\n", __func__); +#endif + // The pre-existing graph exec cannot be updated due to violated constraints + // so instead clear error and re-instantiate + cudaGetLastError(); + CUDA_CHECK(cudaGraphExecDestroy(cuda_ctx->cuda_graph->instance)); + cuda_ctx->cuda_graph->instance = nullptr; + CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); + } else { + GGML_ASSERT(stat == cudaSuccess); + } + // Launch graph + CUDA_CHECK(cudaGraphLaunch(cuda_ctx->cuda_graph->instance, cuda_ctx->stream())); +#else + graph_evaluated_or_captured = true; +#endif // USE_CUDA_GRAPH } return GGML_STATUS_SUCCESS; diff --git a/ggml-cuda/clamp.cu b/ggml-cuda/clamp.cu index 379ded042..8009a3e3d 100644 --- a/ggml-cuda/clamp.cu +++ b/ggml-cuda/clamp.cu @@ -31,5 +31,4 @@ void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream); - CUDA_CHECK(cudaGetLastError()); } diff --git a/ggml-cuda/common.cuh b/ggml-cuda/common.cuh index 582bc1be7..91db18ba3 100644 --- a/ggml-cuda/common.cuh +++ b/ggml-cuda/common.cuh @@ -19,6 +19,7 @@ #include #include #include +#include #if defined(GGML_USE_HIPBLAS) #include @@ -526,6 +527,43 @@ struct ggml_tensor_extra_gpu { cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs }; + +#if (CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS) +#define USE_CUDA_GRAPH +#endif + +struct ggml_graph_node_properties { + void * node_address; + ggml_op node_op; + int64_t ne[GGML_MAX_DIMS]; + size_t nb[GGML_MAX_DIMS]; + void * src_address[GGML_MAX_SRC]; +}; + +struct ggml_cuda_graph { +#ifdef USE_CUDA_GRAPH + ~ggml_cuda_graph() { + if (instance != nullptr) { + CUDA_CHECK(cudaGraphExecDestroy(instance)); + } + if (graph != nullptr) { + CUDA_CHECK(cudaGraphDestroy(graph)); + } + } + cudaGraph_t graph = nullptr; + cudaGraphExec_t instance = nullptr; + size_t num_nodes = 0; + std::vector nodes; + std::vector params; + bool disable_due_to_gpu_arch = false; + bool disable_due_to_too_many_updates = false; + bool disable_due_to_failed_graph_capture = false; + int number_consecutive_updates = 0; + std::vector ggml_graph_properties; + std::vector updated_kernel_arg; +#endif +}; + struct ggml_backend_cuda_context { int device; std::string name; @@ -534,6 +572,8 @@ struct ggml_backend_cuda_context { cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } }; cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr}; + std::unique_ptr cuda_graph; + explicit ggml_backend_cuda_context(int device) : device(device), name(GGML_CUDA_NAME + std::to_string(device)) { diff --git a/ggml-cuda/convert.cu b/ggml-cuda/convert.cu index 75e50c985..830e2d756 100644 --- a/ggml-cuda/convert.cu +++ b/ggml-cuda/convert.cu @@ -727,7 +727,6 @@ static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict_ } to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { - int id; switch (type) { case GGML_TYPE_Q4_0: return dequantize_row_q4_0_cuda; @@ -738,8 +737,7 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { case GGML_TYPE_Q5_1: return dequantize_block_cuda; case GGML_TYPE_Q8_0: - CUDA_CHECK(cudaGetDevice(&id)); - if (ggml_cuda_info().devices[id].cc >= CC_PASCAL) { + if (ggml_cuda_info().devices[ggml_cuda_get_device()].cc >= CC_PASCAL) { return dequantize_block_q8_0_f16_cuda; } return dequantize_block_cuda; diff --git a/ggml-cuda/cpy.cu b/ggml-cuda/cpy.cu index 16d9c8fff..12d741f01 100644 --- a/ggml-cuda/cpy.cu +++ b/ggml-cuda/cpy.cu @@ -459,3 +459,32 @@ void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; ggml_cuda_cpy(ctx, src0, dst); } + +void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + return (void*) cpy_f32_f16; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { + return (void*) cpy_f32_f16; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) { + return (void*) cpy_f32_q; + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { + return (void*) cpy_f32_f16; + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { + return (void*) cpy_f32_f16; + } else { + fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__, + ggml_type_name(src0->type), ggml_type_name(src1->type)); + GGML_ASSERT(false); + } +} + diff --git a/ggml-cuda/cpy.cuh b/ggml-cuda/cpy.cuh index f0b2c453b..796167426 100644 --- a/ggml-cuda/cpy.cuh +++ b/ggml-cuda/cpy.cuh @@ -5,3 +5,5 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1); void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1); diff --git a/ggml-cuda/mmq.cu b/ggml-cuda/mmq.cu index 60d6616a8..7948f1b12 100644 --- a/ggml-cuda/mmq.cu +++ b/ggml-cuda/mmq.cu @@ -1735,8 +1735,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -1780,8 +1779,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -1825,8 +1823,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -1870,8 +1867,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -1915,8 +1911,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -1960,8 +1955,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -2007,8 +2001,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( #if QK_K == 256 - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -2053,8 +2046,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -2098,8 +2090,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; @@ -2143,8 +2134,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda( const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) { - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; int mmq_x, mmq_y, nwarps; diff --git a/ggml-cuda/mmvq.cu b/ggml-cuda/mmvq.cu index 396559001..65cc1bcaa 100644 --- a/ggml-cuda/mmvq.cu +++ b/ggml-cuda/mmvq.cu @@ -89,8 +89,7 @@ static void mul_mat_vec_q_cuda( GGML_ASSERT(ncols_x % qk == 0); GGML_ASSERT(ncols_y <= MMVQ_MAX_BATCH_SIZE); - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); int64_t nwarps = 1; int64_t rows_per_cuda_block = 1; @@ -328,8 +327,7 @@ void ggml_cuda_op_mul_mat_vec_q( const int64_t ne0 = dst->ne[0]; - int id; - CUDA_CHECK(cudaGetDevice(&id)); + int id = ggml_cuda_get_device(); // the main device has a larger memory buffer to hold the results from all GPUs // nrows_dst == nrows of the matrix that the kernel writes into diff --git a/ggml-cuda/scale.cu b/ggml-cuda/scale.cu index 6e3617d1c..1405e066e 100644 --- a/ggml-cuda/scale.cu +++ b/ggml-cuda/scale.cu @@ -28,5 +28,4 @@ void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { memcpy(&scale, dst->op_params, sizeof(float)); scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream); - CUDA_CHECK(cudaGetLastError()); } diff --git a/ggml-metal.m b/ggml-metal.m index c5ee7f38b..c85ff88b0 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -265,11 +265,20 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){ static void * ggml_metal_host_malloc(size_t n) { void * data = NULL; + +#if TARGET_OS_OSX + kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE); + if (err != KERN_SUCCESS) { + GGML_METAL_LOG_ERROR("%s: error: vm_allocate failed\n", __func__); + return NULL; + } +#else const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n); if (result != 0) { GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__); return NULL; } +#endif return data; } @@ -2840,7 +2849,11 @@ GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_ ggml_backend_metal_free_device(); if (ctx->owned) { +#if TARGET_OS_OSX + vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)ctx->all_data, ctx->all_size); +#else free(ctx->all_data); +#endif } free(ctx); @@ -2944,14 +2957,16 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buff ctx->owned = true; ctx->n_buffers = 1; - ctx->buffers[0].data = ctx->all_data; - ctx->buffers[0].size = size; - ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data - length:size_aligned - options:MTLResourceStorageModeShared - deallocator:nil]; + if (ctx->all_data != NULL) { + ctx->buffers[0].data = ctx->all_data; + ctx->buffers[0].size = size; + ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data + length:size_aligned + options:MTLResourceStorageModeShared + deallocator:nil]; + } - if (ctx->buffers[0].metal == nil) { + if (ctx->all_data == NULL || ctx->buffers[0].metal == nil) { GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(ctx); ggml_backend_metal_free_device(); diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 6e968fc4e..5951c0bb0 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -860,7 +860,7 @@ class GGUFValueType(IntEnum): # Note: Does not support GGML_QKK_64 QK_K = 256 # Items here are (block size, type size) -GGML_QUANT_SIZES = { +GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = { GGMLQuantizationType.F32: (1, 4), GGMLQuantizationType.F16: (1, 2), GGMLQuantizationType.Q4_0: (32, 2 + 16), diff --git a/gguf-py/gguf/gguf_reader.py b/gguf-py/gguf/gguf_reader.py index db8525d85..21b089f8a 100644 --- a/gguf-py/gguf/gguf_reader.py +++ b/gguf-py/gguf/gguf_reader.py @@ -65,7 +65,7 @@ class ReaderTensor(NamedTuple): class GGUFReader: # I - same as host, S - swapped - byte_order: Literal['I' | 'S'] = 'I' + byte_order: Literal['I'] | Literal['S'] = 'I' alignment: int = GGUF_DEFAULT_ALIGNMENT # Note: Internal helper, API may change. @@ -83,7 +83,7 @@ class GGUFReader: GGUFValueType.BOOL: np.bool_, } - def __init__(self, path: os.PathLike[str] | str, mode: Literal['r' | 'r+' | 'c'] = 'r'): + def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'): self.data = np.memmap(path, mode = mode) offs = 0 if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC: @@ -128,7 +128,7 @@ class GGUFReader: return self.tensors[idx] def _get( - self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I' | 'S' | '<'] = None, + self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I'] | Literal['S'] | Literal['<'] = None, ) -> npt.NDArray[Any]: count = int(count) itemsize = int(np.empty([], dtype = dtype).itemsize) @@ -250,7 +250,7 @@ class GGUFReader: raise ValueError(f'Found duplicated tensor with name {tensor_name}') tensor_names.add(tensor_name) ggml_type = GGMLQuantizationType(raw_dtype[0]) - n_elems = np.prod(dims) + n_elems = int(np.prod(dims)) block_size, type_size = GGML_QUANT_SIZES[ggml_type] n_bytes = n_elems * type_size // block_size data_offs = int(start_offs + offset_tensor[0]) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index d9cfbf711..8dcf9330b 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -7,7 +7,7 @@ import struct import tempfile from enum import Enum, auto from io import BufferedWriter -from typing import IO, Any, Sequence, Mapping +from typing import IO, Any, Callable, Sequence, Mapping from string import ascii_letters, digits import numpy as np @@ -28,6 +28,47 @@ from .constants import ( logger = logging.getLogger(__name__) +class LazyTensor: + data: Callable[[], np.ndarray[Any, Any]] + # to avoid too deep recursion + functions: list[Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]]] + dtype: np.dtype[Any] + shape: tuple[int, ...] + + def __init__(self, data: Callable[[], np.ndarray[Any, Any]], *, dtype: type, shape: tuple[int, ...]): + self.data = data + self.functions = [] + self.dtype = np.dtype(dtype) + self.shape = shape + + def astype(self, dtype: type, **kwargs) -> LazyTensor: + self.functions.append(lambda n: n.astype(dtype, **kwargs)) + self.dtype = np.dtype(dtype) + return self + + @property + def nbytes(self) -> int: + size = 1 + for n in self.shape: + size *= n + return size * self.dtype.itemsize + + def tofile(self, *args, **kwargs) -> None: + data = self.data() + for f in self.functions: + data = f(data) + assert data.shape == self.shape + assert data.dtype == self.dtype + assert data.nbytes == self.nbytes + self.functions = [] + self.data = lambda: data + data.tofile(*args, **kwargs) + + def byteswap(self, *args, **kwargs) -> LazyTensor: + self.functions.append(lambda n: n.byteswap(*args, **kwargs)) + return self + + class WriterState(Enum): EMPTY = auto() HEADER = auto() @@ -38,7 +79,7 @@ class WriterState(Enum): class GGUFWriter: fout: BufferedWriter temp_file: tempfile.SpooledTemporaryFile[bytes] | None - tensors: list[np.ndarray[Any, Any]] + tensors: list[np.ndarray[Any, Any] | LazyTensor] _simple_value_packing = { GGUFValueType.UINT8: "B", GGUFValueType.INT8: "b", @@ -176,7 +217,7 @@ class GGUFWriter: if pack_fmt is not None: self.kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL) elif vtype == GGUFValueType.STRING: - encoded_val = val.encode("utf8") if isinstance(val, str) else val + encoded_val = val.encode("utf-8") if isinstance(val, str) else val self.kv_data += self._pack("Q", len(encoded_val)) self.kv_data += encoded_val elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val: @@ -205,7 +246,7 @@ class GGUFWriter: raise ValueError(f'Duplicated tensor name {name}') self.ti_names.add(name) - encoded_name = name.encode("utf8") + encoded_name = name.encode("utf-8") self.ti_data += self._pack("Q", len(encoded_name)) self.ti_data += encoded_name n_dims = len(tensor_shape) @@ -237,7 +278,7 @@ class GGUFWriter: self.ti_data_count += 1 def add_tensor( - self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None, + self, name: str, tensor: np.ndarray[Any, Any] | LazyTensor, raw_shape: Sequence[int] | None = None, raw_dtype: GGMLQuantizationType | None = None, ) -> None: if self.endianess == GGUFEndian.BIG: @@ -262,7 +303,7 @@ class GGUFWriter: if pad != 0: fp.write(bytes([0] * pad)) - def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None: + def write_tensor_data(self, tensor: np.ndarray[Any, Any] | LazyTensor) -> None: if self.state is not WriterState.TI_DATA: raise ValueError(f'Expected output file to contain tensor info, got {self.state}') @@ -272,15 +313,33 @@ class GGUFWriter: tensor.tofile(self.fout) self.write_padding(self.fout, tensor.nbytes) - def write_tensors_to_file(self) -> None: + def write_tensors_to_file(self, *, progress: bool = False) -> None: self.write_ti_data_to_file() self.write_padding(self.fout, self.fout.tell()) if self.temp_file is None: + self.tensors.reverse() # to pop from the "beginning" in constant time + + if progress: + from tqdm import tqdm + + total_bytes = sum(t.nbytes for t in self.tensors) + + bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True) + + while True: + try: + tensor = self.tensors.pop() + except IndexError: + break + tensor.tofile(self.fout) + bar.update(tensor.nbytes) + self.write_padding(self.fout, tensor.nbytes) + return while True: try: - tensor = self.tensors.pop(0) + tensor = self.tensors.pop() except IndexError: break tensor.tofile(self.fout) @@ -479,7 +538,7 @@ class GGUFWriter: self.add_bool(Keys.Tokenizer.ADD_PREFIX, value) def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None: - if isinstance(value, list): + if not isinstance(value, str): template_default = None template_names = set() diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index c97a78f39..3ba99be4f 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -4,7 +4,7 @@ import logging import json import os from pathlib import Path -from typing import Any, Callable +from typing import Any, Callable, Sequence, Mapping, Iterable from .gguf_writer import GGUFWriter @@ -15,11 +15,11 @@ class SpecialVocab: merges: list[str] add_special_token: dict[str, bool] special_token_ids: dict[str, int] - chat_template: str | None + chat_template: str | Sequence[Mapping[str, str]] | None def __init__( self, path: str | os.PathLike[str], load_merges: bool = False, - special_token_types: tuple[str, ...] | None = None, + special_token_types: Iterable[str] | None = None, n_vocab: int | None = None, ): self.special_token_ids = {} diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index d1d876d6d..36e63ee3b 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -21,6 +21,7 @@ classifiers = [ [tool.poetry.dependencies] python = ">=3.8" numpy = ">=1.17" +tqdm = ">=4.27" [tool.poetry.dev-dependencies] pytest = "^5.2" diff --git a/gguf-py/scripts/gguf-dump.py b/gguf-py/scripts/gguf-dump.py index 2d3c3943f..1a37a7b91 100755 --- a/gguf-py/scripts/gguf-dump.py +++ b/gguf-py/scripts/gguf-dump.py @@ -47,7 +47,7 @@ def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None: if len(field.types) == 1: curr_type = field.types[0] if curr_type == GGUFValueType.STRING: - log_message += ' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60])) + log_message += ' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf-8')[:60])) elif field.types[0] in reader.gguf_scalar_to_np: log_message += ' = {0}'.format(field.parts[-1][0]) print(log_message) # noqa: NP100 diff --git a/gguf-py/scripts/gguf-new-metadata.py b/gguf-py/scripts/gguf-new-metadata.py index 3444ab418..c8e3a83df 100644 --- a/gguf-py/scripts/gguf-new-metadata.py +++ b/gguf-py/scripts/gguf-new-metadata.py @@ -7,7 +7,7 @@ import json from pathlib import Path import numpy as np -from typing import Any, Mapping, Sequence +from typing import Any, Sequence # Necessary to load the local gguf package if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): @@ -34,7 +34,7 @@ def get_byteorder(reader: gguf.GGUFReader) -> gguf.GGUFEndian: return host_endian -def decode_field(field: gguf.ReaderField) -> Any: +def decode_field(field: gguf.ReaderField | None) -> Any: if field and field.types: main_type = field.types[0] @@ -42,11 +42,11 @@ def decode_field(field: gguf.ReaderField) -> Any: sub_type = field.types[-1] if sub_type == gguf.GGUFValueType.STRING: - return [str(bytes(field.parts[idx]), encoding='utf8') for idx in field.data] + return [str(bytes(field.parts[idx]), encoding='utf-8') for idx in field.data] else: return [pv for idx in field.data for pv in field.parts[idx].tolist()] if main_type == gguf.GGUFValueType.STRING: - return str(bytes(field.parts[-1]), encoding='utf8') + return str(bytes(field.parts[-1]), encoding='utf-8') else: return field.parts[-1][0] @@ -59,7 +59,7 @@ def get_field_data(reader: gguf.GGUFReader, key: str) -> Any: return decode_field(field) -def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: Mapping[str, str], remove_metadata: Sequence[str]) -> None: +def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new_metadata: dict[str, str], remove_metadata: Sequence[str]) -> None: for field in reader.fields.values(): # Suppress virtual fields and fields written by GGUFWriter if field.name == gguf.Keys.General.ARCHITECTURE or field.name.startswith('GGUF.'): @@ -101,7 +101,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new for tensor in reader.tensors: # Dimensions are written in reverse order, so flip them first - shape = np.flipud(tensor.shape) + shape = np.flipud(tensor.shape).tolist() writer.add_tensor_info(tensor.name, shape, tensor.data.dtype, tensor.data.nbytes, tensor.tensor_type) writer.write_header_to_file() diff --git a/llama.cpp b/llama.cpp index 5f07a9369..309abfe48 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4445,9 +4445,15 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "command-r") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; + } else if ( + tokenizer_pre == "qwen2") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; } else if ( tokenizer_pre == "olmo") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; + } else if ( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -12491,6 +12497,7 @@ struct llm_tokenizer_bpe { case LLAMA_VOCAB_TYPE_BPE: switch (vocab.type_pre) { case LLAMA_VOCAB_PRE_TYPE_LLAMA3: + case LLAMA_VOCAB_PRE_TYPE_DBRX: word_collection = unicode_regex_split(text, { // original regex from tokenizer.json //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", @@ -12550,6 +12557,13 @@ struct llm_tokenizer_bpe { "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", }); break; + case LLAMA_VOCAB_PRE_TYPE_QWEN2: + word_collection = unicode_regex_split(text, { + // original regex from tokenizer.json + // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" + "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", + }); + break; default: // default regex for BPE tokenization pre-processing word_collection = unicode_regex_split(text, { diff --git a/llama.h b/llama.h index aed3a2108..a9f5cbb26 100644 --- a/llama.h +++ b/llama.h @@ -81,7 +81,9 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, LLAMA_VOCAB_PRE_TYPE_REFACT = 8, LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, - LLAMA_VOCAB_PRE_TYPE_OLMO = 10, + LLAMA_VOCAB_PRE_TYPE_QWEN2 = 10, + LLAMA_VOCAB_PRE_TYPE_OLMO = 11, + LLAMA_VOCAB_PRE_TYPE_DBRX = 12, }; // note: these values should be synchronized with ggml_rope diff --git a/models/ggml-vocab-qwen2.gguf b/models/ggml-vocab-qwen2.gguf new file mode 100644 index 000000000..541e475bc Binary files /dev/null and b/models/ggml-vocab-qwen2.gguf differ diff --git a/models/ggml-vocab-qwen2.gguf.inp b/models/ggml-vocab-qwen2.gguf.inp new file mode 100644 index 000000000..0a89107c6 --- /dev/null +++ b/models/ggml-vocab-qwen2.gguf.inp @@ -0,0 +1,106 @@ +ied 4 ½ months +__ggml_vocab_test__ +Führer +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + + +__ggml_vocab_test__ + + + +__ggml_vocab_test__ + + + + +__ggml_vocab_test__ + + +__ggml_vocab_test__ +Hello world +__ggml_vocab_test__ + Hello world +__ggml_vocab_test__ +Hello World +__ggml_vocab_test__ + Hello World +__ggml_vocab_test__ + Hello World! +__ggml_vocab_test__ +Hello, world! +__ggml_vocab_test__ + Hello, world! +__ggml_vocab_test__ + this is 🦙.cpp +__ggml_vocab_test__ +w048 7tuijk dsdfhu +__ggml_vocab_test__ +нещо на Български +__ggml_vocab_test__ +កាន់តែពិសេសអាចខលចេញ +__ggml_vocab_test__ +🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) +__ggml_vocab_test__ +Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello + Hello +__ggml_vocab_test__ + ( +__ggml_vocab_test__ + + = +__ggml_vocab_test__ +' era +__ggml_vocab_test__ +Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ +__ggml_vocab_test__ +3 +__ggml_vocab_test__ +33 +__ggml_vocab_test__ +333 +__ggml_vocab_test__ +3333 +__ggml_vocab_test__ +33333 +__ggml_vocab_test__ +333333 +__ggml_vocab_test__ +3333333 +__ggml_vocab_test__ +33333333 +__ggml_vocab_test__ +333333333 +__ggml_vocab_test__ + + + + + + + + + + + +🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL +__ggml_vocab_test__ diff --git a/models/ggml-vocab-qwen2.gguf.out b/models/ggml-vocab-qwen2.gguf.out new file mode 100644 index 000000000..401a510e8 --- /dev/null +++ b/models/ggml-vocab-qwen2.gguf.out @@ -0,0 +1,43 @@ + 1122 220 19 220 26062 3951 + 37 50753 261 + + 220 + 256 + 262 + 197 + 198 + 271 + 1406 + 1572 + 9707 1879 + 21927 1879 + 9707 4337 + 21927 4337 + 21927 4337 0 + 9707 11 1879 0 + 21927 11 1879 0 + 419 374 11162 99 247 13 10821 + 86 15 19 23 220 22 83 1963 41808 11472 2940 16739 + 78762 14144 1456 13073 63471 33594 3038 133178 79012 + 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 147805 148301 147270 44258 223 146848 + 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 320 3243 42365 429 702 1181 1828 3950 8 + 9707 + 21927 + 220 21927 + 256 21927 + 262 21927 + 262 21927 198 262 21927 + 320 + 198 284 + 6 11385 + 9707 11 379 64848 0 2585 525 498 26525 223 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 + 18 + 18 18 + 18 18 18 + 18 18 18 18 + 18 18 18 18 18 + 18 18 18 18 18 18 + 18 18 18 18 18 18 18 + 18 18 18 18 18 18 18 18 + 18 18 18 18 18 18 18 18 18 + 198 4710 14731 65497 7847 1572 2303 78672 10947 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 11162 99 247 149955 220 18 220 18 18 220 18 18 18 220 18 18 18 18 220 18 18 18 18 18 220 18 18 18 18 18 18 220 18 18 18 18 18 18 18 220 18 18 18 18 18 18 18 18 220 18 13 18 220 18 496 18 220 18 1112 18 220 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 144534 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 55460 53237 18658 14144 1456 13073 63471 33594 3038 133178 79012 3355 4605 4605 13874 13874 73594 3014 3014 28149 17085 2928 26610 7646 358 3003 1012 364 83 813 566 594 1052 11 364 787 498 2704 30 364 44 537 2704 358 3278 1281 432 11 364 35 498 1075 1045 15243 30 1205 6 42612 264 63866 43 diff --git a/pyrightconfig.json b/pyrightconfig.json new file mode 100644 index 000000000..020a71a4e --- /dev/null +++ b/pyrightconfig.json @@ -0,0 +1,3 @@ +{ + "extraPaths": ["gguf-py"], +} diff --git a/sgemm.cpp b/sgemm.cpp index 4e0159804..40ba9d7e9 100644 --- a/sgemm.cpp +++ b/sgemm.cpp @@ -1,6 +1,3 @@ -// -*- mode:c++;indent-tabs-mode:nil;c-basic-offset:4;coding:utf-8 -*- -// vi: set et ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi -// // Copyright 2024 Mozilla Foundation // // Permission is hereby granted, free of charge, to any person obtaining @@ -585,15 +582,15 @@ class tinyBLAS_Q0_ARM { }; #endif // __ARM_FEATURE_DOTPROD -#if defined(__AVX2__) || defined(__AVX512F__) +#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) template -class tinyBLAS_Q0_AVX2 { +class tinyBLAS_Q0_AVX { public: - tinyBLAS_Q0_AVX2(int64_t k, - const TA *A, int64_t lda, - const TB *B, int64_t ldb, - TC *C, int64_t ldc, - int ith, int nth) + tinyBLAS_Q0_AVX(int64_t k, + const TA *A, int64_t lda, + const TB *B, int64_t ldb, + TC *C, int64_t ldc, + int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } @@ -728,14 +725,34 @@ class tinyBLAS_Q0_AVX2 { __m256 Cv[RN][RM] = {}; for (int64_t l = 0; l < k; ++l) for (int64_t j = 0; j < RN; ++j) - for (int64_t i = 0; i < RM; ++i) + for (int64_t i = 0; i < RM; ++i) { +#if defined(__AVX2__) + __m256 udTmp = updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), + load(A + lda * (ii + i) + l)), + _mm256_sign_epi8(load(B + ldb * (jj + j) + l), + load(A + lda * (ii + i) + l))); +#else + __m128i ali0 = load0(A + lda * (ii + i) + l); + __m128i ali1 = load1(A + lda * (ii + i) + l); + __m128i blj0 = load0(B + ldb * (jj + j) + l); + __m128i blj1 = load1(B + ldb * (jj + j) + l); + + __m128i sepAA0 = _mm_sign_epi8(ali0, ali0); + __m128i sepAA1 = _mm_sign_epi8(ali1, ali1); + __m128i sepBA0 = _mm_sign_epi8(blj0, ali0); + __m128i sepBA1 = _mm_sign_epi8(blj1, ali1); + + // updot + const __m128i oneFill = _mm_set1_epi16(1); + __m128i mad0 = _mm_maddubs_epi16(sepAA0, sepBA0); + __m128i mad1 = _mm_maddubs_epi16(sepAA1, sepBA1); + __m256 udTmp = _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_madd_epi16(oneFill, mad1), _mm_madd_epi16(oneFill, mad0))); +#endif Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) * unhalf(B[ldb * (jj + j) + l].d)), - updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), - load(A + lda * (ii + i) + l)), - _mm256_sign_epi8(load(B + ldb * (jj + j) + l), - load(A + lda * (ii + i) + l))), - Cv[j][i]); + udTmp, + Cv[j][i]); + } for (int64_t j = 0; j < RN; ++j) for (int64_t i = 0; i < RM; ++i) C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); @@ -746,10 +763,28 @@ class tinyBLAS_Q0_AVX2 { return _mm256_loadu_si256((const __m256i *)b->qs); } + inline __m128i load0(const block_q8_0 *b) { + return _mm_loadu_si128((const __m128i *)b->qs); + } + + inline __m128i load1(const block_q8_0 *b) { + return _mm_loadu_si128(((const __m128i *)b->qs) + 1); + } + inline __m256i load(const block_q4_0 *b) { return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8)); } + inline __m128i load0(const block_q4_0 *b) { + const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); + return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), x), _mm_set1_epi8(8)); + } + + inline __m128i load1(const block_q4_0 *b) { + const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs)); + return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)), _mm_set1_epi8(8)); + } + inline __m256 updot(__m256i u, __m256i s) { __m256i res; #if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) @@ -777,7 +812,7 @@ class tinyBLAS_Q0_AVX2 { const int ith; const int nth; }; -#endif // __AVX2__ +#endif // __AVX__ } // namespace @@ -928,8 +963,8 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda case GGML_TYPE_Q8_0: { if (Btype != GGML_TYPE_Q8_0) return false; -#if defined(__AVX2__) || defined(__AVX512F__) - tinyBLAS_Q0_AVX2 tb{ +#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) + tinyBLAS_Q0_AVX tb{ k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, @@ -952,8 +987,8 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda case GGML_TYPE_Q4_0: { if (Btype != GGML_TYPE_Q8_0) return false; -#if defined(__AVX2__) || defined(__AVX512F__) - tinyBLAS_Q0_AVX2 tb{ +#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__) + tinyBLAS_Q0_AVX tb{ k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc,