diff --git a/.github/workflows/nix-ci-aarch64.yml b/.github/workflows/nix-ci-aarch64.yml index be7c26d40..0c6cf5f09 100644 --- a/.github/workflows/nix-ci-aarch64.yml +++ b/.github/workflows/nix-ci-aarch64.yml @@ -2,13 +2,20 @@ name: Nix aarch64 builds on: workflow_dispatch: # allows manual triggering + schedule: + # Rebuild daily rather than on every push because QEMU is expensive (e.g. + # 1.5h instead of minutes with the cold cache). + # + # randint(0, 59), randint(0, 23) + - cron: '26 12 * * *' + # But also rebuild if we touched any of the Nix expressions: push: branches: - master - paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', '**/*.sh', '**/*.py', '**/*.nix'] + paths: ['**/*.nix', 'flake.lock'] pull_request: types: [opened, synchronize, reopened] - paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', '**/*.sh', '**/*.py', '**/*.nix'] + paths: ['**/*.nix', 'flake.lock'] jobs: nix-build-aarch64: diff --git a/common/common.cpp b/common/common.cpp index d04c37d7f..fc42de4c1 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -204,6 +204,25 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { params.prompt_cache_all = true; } else if (arg == "--prompt-cache-ro") { params.prompt_cache_ro = true; + } else if (arg == "-bf" || arg == "--binary-file") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::ifstream file(argv[i], std::ios::binary); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + invalid_param = true; + break; + } + // store the external file name in params + params.prompt_file = argv[i]; + file.seekg(0, std::ios::end); + size_t size = file.tellg(); + file.seekg(0, std::ios::beg); + params.prompt.resize(size); + file.read((char *)params.prompt.data(), size); + fprintf(stderr, "Read %zu bytes from binary file %s\n", size, argv[i]); } else if (arg == "-f" || arg == "--file") { if (++i >= argc) { invalid_param = true; @@ -654,6 +673,12 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { if (params.logdir.back() != DIRECTORY_SEPARATOR) { params.logdir += DIRECTORY_SEPARATOR; } + } else if (arg == "--save-all-logits" || arg == "--kl-divergence-base") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.logits_file = argv[i]; } else if (arg == "--perplexity" || arg == "--all-logits") { params.logits_all = true; } else if (arg == "--ppl-stride") { @@ -690,6 +715,16 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } params.winogrande_tasks = std::stoi(argv[i]); + } else if (arg == "--multiple-choice") { + params.multiple_choice = true; + } else if (arg == "--multiple-choice-tasks") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.multiple_choice_tasks = std::stoi(argv[i]); + } else if (arg == "--kl-divergence") { + params.kl_divergence = true; } else if (arg == "--ignore-eos") { params.ignore_eos = true; } else if (arg == "--no-penalize-nl") { @@ -889,6 +924,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --in-suffix STRING string to suffix after user inputs with (default: empty)\n"); printf(" -f FNAME, --file FNAME\n"); printf(" prompt file to start generation.\n"); + printf(" -bf FNAME, --binary-file FNAME\n"); + printf(" binary file containing multiple choice tasks.\n"); printf(" -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict); printf(" -c N, --ctx-size N size of the prompt context (default: %d, 0 = loaded from model)\n", params.n_ctx); printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); @@ -937,6 +974,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks); printf(" --winogrande compute Winogrande score over random tasks from datafile supplied with -f\n"); printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks); + printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n"); + printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks); + printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base"); printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft); printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); diff --git a/common/common.h b/common/common.h index 013818f0e..aa4ddff45 100644 --- a/common/common.h +++ b/common/common.h @@ -103,6 +103,7 @@ struct gpt_params { std::string input_suffix = ""; // string to suffix user inputs with std::vector antiprompt; // string upon seeing which more user input is prompted std::string logdir = ""; // directory in which to save YAML log files + std::string logits_file = ""; // file for saving *all* logits std::vector kv_overrides; @@ -120,6 +121,11 @@ struct gpt_params { bool winogrande = false; // compute Winogrande score over random tasks from datafile supplied in prompt size_t winogrande_tasks= 0; // number of tasks to use when computing the Winogrande score. If 0, all tasks will be computed + bool multiple_choice = false; // compute TruthfulQA score over random tasks from datafile supplied in prompt + size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed + + bool kl_divergence = false; // compute KL-divergence + bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS bool random_prompt = false; // do not randomize prompt if none provided bool use_color = false; // use color to distinguish generations and inputs diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 4d995ef78..7a0a8c3db 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -289,6 +289,58 @@ class Model: special_vocab = gguf.SpecialVocab(dir_model, load_merges=True) special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_qwen(self): + dir_model = self.dir_model + hparams = self.hparams + tokens: list[bytearray] = [] + toktypes: list[int] = [] + + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) + vocab_size = hparams["vocab_size"] + assert max(tokenizer.get_vocab().values()) < vocab_size + + merges = [] + vocab = {} + mergeable_ranks = tokenizer.mergeable_ranks + for token, rank in mergeable_ranks.items(): + vocab[QwenModel.token_bytes_to_string(token)] = rank + if len(token) == 1: + continue + merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank) + assert len(merged) == 2 + merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged))) + + # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined + added_vocab = tokenizer.special_tokens + reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in (vocab | added_vocab).items()} + + for i in range(vocab_size): + if i not in reverse_vocab: + pad_token = f"[PAD{i}]".encode("utf-8") + tokens.append(bytearray(pad_token)) + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.CONTROL) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) + + self.gguf_writer.add_tokenizer_model("gpt2") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_types(toktypes) + + special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) + special_vocab.merges = merges + # only add special tokens when they were not already loaded from config.json + if len(special_vocab.special_token_ids) == 0: + special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"]) + special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"]) + # this one is usually not in config.json anyway + special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"]) + special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_sentencepiece(self): from sentencepiece import SentencePieceProcessor @@ -877,6 +929,13 @@ class PersimmonModel(Model): class StableLMModel(Model): + def set_vocab(self): + if (self.dir_model / "tokenizer.json").is_file(): + self._set_vocab_gpt2() + else: + # StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab + self._set_vocab_qwen() + def set_gguf_parameters(self): hparams = self.hparams block_count = hparams["num_hidden_layers"] @@ -922,52 +981,7 @@ class QwenModel(Model): return parts def set_vocab(self): - dir_model = self.dir_model - hparams = self.hparams - tokens: list[bytearray] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["vocab_size"] - assert max(tokenizer.get_vocab().values()) < vocab_size - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[self.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) == 2 - merges.append(' '.join(map(self.token_bytes_to_string, merged))) - - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in vocab.items()} - added_vocab = tokenizer.special_tokens - - for i in range(vocab_size): - if i not in reverse_vocab: - pad_token = f"[PAD{i}]".encode("utf-8") - tokens.append(bytearray(pad_token)) - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.CONTROL) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges - special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab.add_to_gguf(self.gguf_writer) + self._set_vocab_qwen() def set_gguf_parameters(self): self.gguf_writer.add_name("Qwen") diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index 4904bf128..9a9936dec 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -59,7 +59,14 @@ if __name__ == '__main__': input_model = os.path.join(sys.argv[1], "adapter_model.bin") output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin") - model = torch.load(input_model, map_location="cpu") + if os.path.exists(input_model): + model = torch.load(input_model, map_location="cpu") + else: + input_model = os.path.join(sys.argv[1], "adapter_model.safetensors") + # lazy import load_file only if lora is in safetensors format. + from safetensors.torch import load_file + model = load_file(input_model, device="cpu") + arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama" if arch_name not in gguf.MODEL_ARCH_NAMES.values(): diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 11fcbf443..b7e19c5fe 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1800,6 +1800,8 @@ int main(int argc, char ** argv) { std::vector train_samples_begin; std::vector train_samples_size; printf("%s: tokenize training data from %s\n", __func__, params.common.fn_train_data); + printf("%s: sample-start: %s\n", __func__, params.common.sample_start.c_str()); + printf("%s: include-sample-start: %s\n", __func__, params.common.include_sample_start ? "true" : "false"); tokenize_file(lctx, params.common.fn_train_data, params.common.sample_start, diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 5687476cd..ea06fcdbf 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -26,6 +26,7 @@ struct StatParams { std::string ofile = "imatrix.dat"; int n_output_frequency = 10; int verbosity = 1; + int keep_every = 0; bool collect_output_weight = false; }; @@ -42,6 +43,9 @@ private: int m_last_call = 0; std::vector m_src1_data; std::vector m_ids; // the expert ids from ggml_mul_mat_id + // + void save_imatrix(const char * file_name) const; + void keep_imatrix(int ncall) const; }; bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) { @@ -117,6 +121,9 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * if (m_last_call % m_params.n_output_frequency == 0) { save_imatrix(); } + if (m_params.keep_every > 0 && m_last_call%m_params.keep_every == 0) { + keep_imatrix(m_last_call); + } } } } else { @@ -143,6 +150,9 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * if (m_last_call % m_params.n_output_frequency == 0) { save_imatrix(); } + if (m_params.keep_every > 0 && m_last_call%m_params.keep_every == 0) { + keep_imatrix(m_last_call); + } } } @@ -150,7 +160,18 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * } void IMatrixCollector::save_imatrix() const { - const char * fname = m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str(); + save_imatrix(m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str()); +} + +void IMatrixCollector::keep_imatrix(int ncall) const { + auto file_name = m_params.ofile; + if (file_name.empty()) file_name = "imatrix.dat"; + file_name += ".at_"; + file_name += std::to_string(ncall); + save_imatrix(file_name.c_str()); +} + +void IMatrixCollector::save_imatrix(const char * fname) const { std::ofstream out(fname, std::ios::binary); int n_entries = m_stats.size(); out.write((const char*)&n_entries, sizeof(n_entries)); @@ -400,6 +421,8 @@ int main(int argc, char ** argv) { sparams.verbosity = std::stoi(argv[++iarg]); } else if (arg == "--no-ppl") { compute_ppl = false; + } else if (arg == "--keep-imatrix") { + sparams.keep_every = std::stoi(argv[++iarg]); } else { args.push_back(argv[iarg]); } diff --git a/examples/llava/MobileVLM-README.md b/examples/llava/MobileVLM-README.md new file mode 100644 index 000000000..c6258eba6 --- /dev/null +++ b/examples/llava/MobileVLM-README.md @@ -0,0 +1,131 @@ +# MobileVLM + +Currently this implementation supports [MobileVLM-v1.7](https://huggingface.co/mtgv/MobileVLM-1.7B) variants. + +for more information, please go to [Meituan-AutoML/MobileVLM](https://github.com/Meituan-AutoML/MobileVLM) + +The implementation is based on llava, and is compatible with llava and mobileVLM. The usage is basically same as llava. + +## Usage +Build with cmake or run `make llava-cli` to build it. + +After building, run: `./llava-cli` to see the usage. For example: + +```sh +./llava-cli -m MobileVLM-1.7B/ggml-model-q4_k.gguf \ + --mmproj MobileVLM-1.7B/mmproj-model-f16.gguf \ + --image path/to/an/image.jpg \ + -p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: \nWho is the author of this book? Answer the question using a single word or phrase. ASSISTANT:" +``` + +## Model conversion + +- Clone `mobileVLM-1.7B` and `clip-vit-large-patch14-336` locally: + +```sh +git clone https://huggingface.co/mtgv/MobileVLM-1.7B + +git clone https://huggingface.co/openai/clip-vit-large-patch14-336 +``` + +2. Use `llava-surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents: + +```sh +python ./examples/llava/llava-surgery.py -m path/to/MobileVLM-1.7B +``` + +3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` to convert the LLaVA image encoder to GGUF: + +```sh +python ./examples/llava/convert-image-encoder-to-gguf \ + -m path/to/clip-vit-large-patch14-336 \ + --llava-projector path/to/MobileVLM-1.7B/llava.projector \ + --output-dir path/to/MobileVLM-1.7B \ + --projector-type ldp +``` + +4. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF: + +```sh +python ./convert.py path/to/MobileVLM-1.7B +``` + +5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k` +```sh +./quantize path/to/MobileVLM-1.7B/ggml-model-f16.gguf path/to/MobileVLM-1.7B/ggml-model-q4_k.gguf q4_k_s +``` + +Now both the LLaMA part and the image encoder is in the `MobileVLM-1.7B` directory. + +## Android compile and run +### compile +refer to `examples/llava/android/build_64.sh` +```sh +mkdir examples/llava/android/build_64 +cd examples/llava/android/build_64 +../build_64.sh +``` +### run on Android +refer to `android/adb_run.sh`, modify resources' `name` and `path` + +## some result on Android with `Snapdragon 888` chip +### case 1 +**input** +```sh +/data/local/tmp/llava-cli \ + -m /data/local/tmp/ggml-model-q4_k.gguf \ + --mmproj /data/local/tmp/mmproj-model-f16.gguf \ + -t 4 \ + --image /data/local/tmp/demo.jpg \ + -p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: \nWho is the author of this book? \nAnswer the question using a single word or phrase. ASSISTANT:" +``` +**output** +```sh +encode_image_with_clip: image encoded in 21148.71 ms by CLIP ( 146.87 ms per image patch) + Susan Wise Bauer +llama_print_timings: load time = 23574.72 ms +llama_print_timings: sample time = 1.24 ms / 6 runs ( 0.21 ms per token, 4850.44 tokens per second) +llama_print_timings: prompt eval time = 12460.15 ms / 246 tokens ( 50.65 ms per token, 19.74 tokens per second) +llama_print_timings: eval time = 424.86 ms / 6 runs ( 70.81 ms per token, 14.12 tokens per second) +llama_print_timings: total time = 34731.93 ms +``` +### case 2 +**input** +```sh +/data/local/tmp/llava-cli \ + -m /data/local/tmp/ggml-model-q4_k.gguf \ + --mmproj /data/local/tmp/mmproj-model-f16.gguf \ + -t 4 \ + --image /data/local/tmp/cat.jpeg \ + -p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: \nWhat is in the image? ASSISTANT:" +``` + +**output** +```sh +encode_image_with_clip: image encoded in 21149.51 ms by CLIP ( 146.87 ms per image patch) + The image depicts a cat sitting in the grass near some tall green plants. +llama_print_timings: load time = 23257.32 ms +llama_print_timings: sample time = 5.25 ms / 18 runs ( 0.29 ms per token, 3430.53 tokens per second) +llama_print_timings: prompt eval time = 11900.73 ms / 232 tokens ( 51.30 ms per token, 19.49 tokens per second) +llama_print_timings: eval time = 1279.03 ms / 18 runs ( 71.06 ms per token, 14.07 tokens per second) +llama_print_timings: total time = 34570.79 ms +``` + +## Minor shortcomings +The `n_patch` of output in `ldp` is 1/4 of the input. In order to implement quickly, we uniformly modified `clip_n_patches` function to a quarter. when counting the time consumption, the calculated time will be 4 times bigger than the real cost. + +## TODO + +- [ ] Support non-CPU backend for the new operators, such as `depthwise`, `hardswish`, `hardsigmoid` +- [ ] Optimize LDP projector performance + + - Optimize the structure definition to avoid unnecessary memory rearrangements, to reduce the use of `ggml_permute_cpy`; + - Optimize operator implementation (ARM CPU/NVIDIA GPU): such as depthwise conv, hardswish, hardsigmoid, etc. +- [ ] run MobileVLM on `Jetson Orin` +- [ ] Support more model variants, such as `MobileVLM-3B`. + + +## contributor +```sh +zhangjidong05, yangyang260, huyiming03, chenxiaotao03 +``` diff --git a/examples/llava/android/adb_run.sh b/examples/llava/android/adb_run.sh new file mode 100755 index 000000000..f73623ae3 --- /dev/null +++ b/examples/llava/android/adb_run.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +model_dir="/Users/cxt/model/llm/mobileVLM/MobileVLM-1.7B_processed" +projector_name="mmproj-model-f16.gguf" +llama_name="ggml-model-q4_k.gguf" +img_dir="/Users/cxt/model/llm" +img_name="demo.jpg" +prompt="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: \nWho is the author of this book? \nAnswer the question using a single word or phrase. ASSISTANT:" +# img_name="cat.jpeg" +# prompt="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: \nWhat is in the image? ASSISTANT:" + +program_dir="build_64/bin" +binName="llava-cli" +n_threads=4 + + +deviceDir="/data/local/tmp" +saveDir="output" +if [ ! -d ${saveDir} ]; then + mkdir ${saveDir} +fi + + +function android_run() { + # # copy resource into device + # adb push ${model_dir}/${projector_name} ${deviceDir}/${projector_name} + # adb push ${model_dir}/${llama_name} ${deviceDir}/${llama_name} + adb push ${img_dir}/${img_name} ${deviceDir}/${img_name} + # copy program into device + adb push ${program_dir}/${binName} ${deviceDir}/${binName} + adb shell "chmod 0777 ${deviceDir}/${binName}" + + # run + adb shell "echo cd ${deviceDir} ${deviceDir}/${binName} \ + -m ${deviceDir}/${llama_name} \ + --mmproj ${deviceDir}/${projector_name} \ + -t ${n_threads} \ + --image ${deviceDir}/${img_name} \ + -p \"${prompt}\" \ + > ${deviceDir}/${modelName}_${projector_name}_${n_threads}_${img_name}.txt" + adb shell "cd ${deviceDir}; pwd; ${deviceDir}/${binName} \ + -m ${deviceDir}/${llama_name} \ + --mmproj ${deviceDir}/${projector_name} \ + -t ${n_threads} \ + --image ${deviceDir}/${img_name} \ + -p \"${prompt}\" \ + >> ${deviceDir}/${modelName}_${projector_name}_${n_threads}_${img_name}.txt 2>&1" + adb pull ${deviceDir}/${modelName}_${projector_name}_${n_threads}_${img_name}.txt ${saveDir} +} + +android_run + +echo "android_run is Done!" diff --git a/examples/llava/android/build_64.sh b/examples/llava/android/build_64.sh new file mode 100755 index 000000000..71b6fd3f7 --- /dev/null +++ b/examples/llava/android/build_64.sh @@ -0,0 +1,8 @@ +#!/bin/bash +cmake ../../../../ \ +-DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \ +-DCMAKE_BUILD_TYPE=Release \ +-DANDROID_ABI="arm64-v8a" \ +-DANDROID_PLATFORM=android-23 $1 + +make -j4 diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 2ae8853d3..6161fd858 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include "clip.h" #include "ggml.h" @@ -67,6 +68,7 @@ static std::string format(const char * fmt, ...) { #define KEY_PATCH_SIZE "clip.vision.patch_size" #define KEY_IMAGE_MEAN "clip.vision.image_mean" #define KEY_IMAGE_STD "clip.vision.image_std" +#define KEY_PROJ_TYPE "clip.projector_type" // // tensor name constants @@ -89,6 +91,21 @@ static std::string format(const char * fmt, ...) { #define TN_TEXT_PROJ "text_projection.weight" #define TN_VIS_PROJ "visual_projection.weight" #define TN_LLAVA_PROJ "mm.%d.%s" +#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s" +#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s" + + +enum projector_type { + PROJECTOR_TYPE_MLP, + PROJECTOR_TYPE_LDP, + PROJECTOR_TYPE_UNKNOWN, +}; + +static std::map PROJECTOR_TYPE_NAMES = { + { PROJECTOR_TYPE_MLP, "mlp" }, + { PROJECTOR_TYPE_LDP, "ldp" }, +}; + // // utilities to get data from a gguf file @@ -129,6 +146,91 @@ static std::string get_ftype(int ftype) { return ggml_type_name(static_cast(ftype)); } +static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { + switch (type) { + case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); + case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); + case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); + case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); + case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); + case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); + case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); + case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); + case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); + case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); + case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; + default: return format("unknown type %d", type); + } +} + + +static void replace_all(std::string & s, const std::string & search, const std::string & replace) { + std::string result; + for (size_t pos = 0; ; pos += search.length()) { + auto new_pos = s.find(search, pos); + if (new_pos == std::string::npos) { + result += s.substr(pos, s.size() - pos); + break; + } + result += s.substr(pos, new_pos - pos) + replace; + pos = new_pos; + } + s = std::move(result); +} + +static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + switch (type) { + case GGUF_TYPE_STRING: + return gguf_get_val_str(ctx_gguf, i); + case GGUF_TYPE_ARRAY: + { + const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); + int arr_n = gguf_get_arr_n(ctx_gguf, i); + const void * data = gguf_get_arr_data(ctx_gguf, i); + std::stringstream ss; + ss << "["; + for (int j = 0; j < arr_n; j++) { + if (arr_type == GGUF_TYPE_STRING) { + std::string val = gguf_get_arr_str(ctx_gguf, i, j); + // escape quotes + replace_all(val, "\\", "\\\\"); + replace_all(val, "\"", "\\\""); + ss << '"' << val << '"'; + } else if (arr_type == GGUF_TYPE_ARRAY) { + ss << "???"; + } else { + ss << gguf_data_to_str(arr_type, data, j); + } + if (j < arr_n - 1) { + ss << ", "; + } + } + ss << "]"; + return ss.str(); + } + default: + return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); + } +} + +static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") { + size_t tensor_size = ggml_nbytes(tensor); + printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%d, %d, %d, %d], type: %d\n", + prefix, ggml_n_dims(tensor), tensor->name, tensor_size, + tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->type); +} + +static projector_type clip_projector_type_from_string(const std::string & name) { + for (const auto & kv : PROJECTOR_TYPE_NAMES) { // NOLINT + if (kv.second == name) { + return kv.first; + } + } + return PROJECTOR_TYPE_UNKNOWN; +} + // // image data // @@ -205,6 +307,32 @@ struct clip_vision_model { struct ggml_tensor * mm_0_b; struct ggml_tensor * mm_2_w; struct ggml_tensor * mm_2_b; + + // MobileVLM projection + struct ggml_tensor * mm_model_mlp_1_w; + struct ggml_tensor * mm_model_mlp_1_b; + struct ggml_tensor * mm_model_mlp_3_w; + struct ggml_tensor * mm_model_mlp_3_b; + struct ggml_tensor * mm_model_block_1_block_0_0_w; + struct ggml_tensor * mm_model_block_1_block_0_1_w; + struct ggml_tensor * mm_model_block_1_block_0_1_b; + struct ggml_tensor * mm_model_block_1_block_1_fc1_w; + struct ggml_tensor * mm_model_block_1_block_1_fc1_b; + struct ggml_tensor * mm_model_block_1_block_1_fc2_w; + struct ggml_tensor * mm_model_block_1_block_1_fc2_b; + struct ggml_tensor * mm_model_block_1_block_2_0_w; + struct ggml_tensor * mm_model_block_1_block_2_1_w; + struct ggml_tensor * mm_model_block_1_block_2_1_b; + struct ggml_tensor * mm_model_block_2_block_0_0_w; + struct ggml_tensor * mm_model_block_2_block_0_1_w; + struct ggml_tensor * mm_model_block_2_block_0_1_b; + struct ggml_tensor * mm_model_block_2_block_1_fc1_w; + struct ggml_tensor * mm_model_block_2_block_1_fc1_b; + struct ggml_tensor * mm_model_block_2_block_1_fc2_w; + struct ggml_tensor * mm_model_block_2_block_1_fc2_b; + struct ggml_tensor * mm_model_block_2_block_2_0_w; + struct ggml_tensor * mm_model_block_2_block_2_1_w; + struct ggml_tensor * mm_model_block_2_block_2_1_b; }; struct clip_ctx { @@ -213,6 +341,7 @@ struct clip_ctx { bool has_llava_projector = false; struct clip_vision_model vision_model; + projector_type proj_type = PROJECTOR_TYPE_MLP; float image_mean[3]; float image_std[3]; @@ -430,16 +559,135 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 free(patches_data); } + // shape [1, 576, 1024] + // ne is whcn, ne = [1024, 576, 1, 1] embeddings = ggml_get_rows(ctx0, embeddings, patches); - // mm projection 0 - embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); - embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); + // print_tensor_info(embeddings, "embeddings"); - embeddings = ggml_gelu(ctx0, embeddings); + // llava projector + if (ctx->proj_type == PROJECTOR_TYPE_MLP) { + embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); - embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings); - embeddings = ggml_add(ctx0, embeddings, model.mm_2_b); + embeddings = ggml_gelu(ctx0, embeddings); + + embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_2_b); + } + else if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + // MobileVLM projector + int n_patch = 24; + struct ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings); + mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b); + mlp_1 = ggml_gelu(ctx0, mlp_1); + struct ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1); + mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b); + // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1] + + // block 1 + struct ggml_tensor * block_1 = nullptr; + { + // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24] + mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3)); + mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]); + // stride = 1, padding = 1, bias is nullptr + block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, nullptr, 1, 1, 1, 1, 1, 1); + + // layer norm + // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3)); + // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3)); + + // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] + // hardswish + struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1); + + block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0); + // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1] + // pointwise conv + block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b); + block_1 = ggml_relu(ctx0, block_1); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b); + block_1 = ggml_hardsigmoid(ctx0, block_1); + // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1] + block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]); + block_1 = ggml_mul(ctx0, block_1_hw, block_1); + + int w = block_1->ne[0], h = block_1->ne[1]; + block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3)); + + // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1] + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1); + block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]); + + // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3)); + // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] + // residual + block_1 = ggml_add(ctx0, mlp_3, block_1); + } + + // block_2 + { + // stride = 2 + block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, nullptr, 2, 2, 1, 1, 1, 1); + + // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1] + // layer norm + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3)); + // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3)); + // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1] + // hardswish + struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1); + + // not sure the parameters is right for globalAvgPooling + block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0); + // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1] + // pointwise conv + block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b); + block_1 = ggml_relu(ctx0, block_1); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b); + block_1 = ggml_hardsigmoid(ctx0, block_1); + + // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1] + block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]); + block_1 = ggml_mul(ctx0, block_1_hw, block_1); + + int w = block_1->ne[0], h = block_1->ne[1]; + block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3)); + // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1] + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1); + block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]); + + + // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b); + block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]); + // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1] + } + embeddings = block_1; + } + else { + GGML_ASSERT(false); + } } // build the graph @@ -485,16 +733,55 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { printf("\n"); } const int n_tensors = gguf_get_n_tensors(ctx); + // kv - if (verbosity >= 3) { - const int n_kv = gguf_get_n_kv(ctx); + const int n_kv = gguf_get_n_kv(ctx); + printf("%s: loaded meta data with %d key-value pairs and %d tensors from %s\n", + __func__, n_kv, n_tensors, fname); + { + std::map n_type; - for (int i = 0; i < n_kv; ++i) { - const char * key = gguf_get_key(ctx, i); + uint32_t n_type_max = 0; + enum ggml_type type_max = GGML_TYPE_F32; - printf("%s: kv[%d]: key = %s\n", __func__, i, key); + for (int i = 0; i < n_tensors; i++) { + enum ggml_type type = gguf_get_tensor_type(ctx, i); + + n_type[type]++; + + if (n_type_max < n_type[type]) { + n_type_max = n_type[type]; + type_max = type; + } + } + + printf("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(ctx, i); + const enum gguf_type type = gguf_get_kv_type(ctx, i); + const std::string type_name = + type == GGUF_TYPE_ARRAY + ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx, i)), gguf_get_arr_n(ctx, i)) + : gguf_type_name(type); + + std::string value = gguf_kv_to_str(ctx, i); + const size_t MAX_VALUE_LEN = 40; + if (value.size() > MAX_VALUE_LEN) { + value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); + } + replace_all(value, "\n", "\\n"); + + printf("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); + } + + // print type counts + for (auto & kv : n_type) { + if (kv.second == 0) { + continue; + } + + printf("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); } - printf("\n"); } // data @@ -503,20 +790,35 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { for (int i = 0; i < n_tensors; ++i) { const char * name = gguf_get_tensor_name(ctx, i); const size_t offset = gguf_get_tensor_offset(ctx, i); + enum ggml_type type = gguf_get_tensor_type(ctx, i); struct ggml_tensor * cur = ggml_get_tensor(meta, name); size_t tensor_size = ggml_nbytes(cur); buffer_size += tensor_size; if (verbosity >= 3) { - printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu\n", __func__, i, - ggml_n_dims(cur), cur->name, tensor_size, offset); + printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%d, %d, %d, %d], type: %d\n", __func__, i, + ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], type); } } } + + buffer_size += n_tensors * 128 /* CLIP PADDING */; clip_ctx * new_clip = new clip_ctx; + // update projector type + { + int idx = gguf_find_key(ctx, KEY_PROJ_TYPE); + if (idx != -1) { + const std::string proj_type = gguf_get_val_str(ctx, idx); + new_clip->proj_type = clip_projector_type_from_string(proj_type); + } + else { + new_clip->proj_type = PROJECTOR_TYPE_MLP; + } + } + #ifdef GGML_USE_CUBLAS new_clip->backend = ggml_backend_cuda_init(0); printf("%s: CLIP using CUDA backend\n", __func__); @@ -661,10 +963,45 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v")); vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight")); vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias")); - vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight")); - vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias")); - vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight")); - vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias")); + + // LLaVA projection + if (new_clip->proj_type == PROJECTOR_TYPE_MLP) { + vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight")); + vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias")); + vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight")); + vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias")); + } + else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) { + // MobileVLM projection + vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight")); + vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias")); + vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "weight")); + vision_model.mm_model_mlp_3_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "bias")); + vision_model.mm_model_block_1_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight")); + vision_model.mm_model_block_1_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight")); + vision_model.mm_model_block_1_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias")); + vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight")); + vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias")); + vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight")); + vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias")); + vision_model.mm_model_block_1_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight")); + vision_model.mm_model_block_1_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight")); + vision_model.mm_model_block_1_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias")); + vision_model.mm_model_block_2_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight")); + vision_model.mm_model_block_2_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight")); + vision_model.mm_model_block_2_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias")); + vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight")); + vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias")); + vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight")); + vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias")); + vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight")); + vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight")); + vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias")); + } + else { + std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type]; + throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); + } vision_model.layers.resize(hparams.n_layer); for (int il = 0; il < hparams.n_layer; ++il) { @@ -1100,13 +1437,25 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i } int clip_n_mmproj_embd(const struct clip_ctx * ctx) { - return ctx->vision_model.mm_2_b->ne[0]; + if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0]; + } + else if (ctx->proj_type == PROJECTOR_TYPE_MLP) { + return ctx->vision_model.mm_2_b->ne[0]; + } + else { + std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type]; + throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); + } } int clip_n_patches(const struct clip_ctx * ctx) { auto & params = ctx->vision_model.hparams; - - return (params.image_size / params.patch_size) * (params.image_size / params.patch_size); + int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); + if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + n_patches /= 4; + } + return n_patches; } size_t clip_embd_nbytes(const struct clip_ctx * ctx) { diff --git a/examples/llava/convert-image-encoder-to-gguf.py b/examples/llava/convert-image-encoder-to-gguf.py index 03688e0ea..f5a3c9b46 100644 --- a/examples/llava/convert-image-encoder-to-gguf.py +++ b/examples/llava/convert-image-encoder-to-gguf.py @@ -81,6 +81,7 @@ ap.add_argument("--vision-only", action="store_true", required=False, ap.add_argument("--clip_model_is_vision", action="store_true", required=False, help="The clip model is a pure vision model (ShareGPT4V vision extract for example)") ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.") +ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp", choices=["mlp", "ldp"], default="mlp") ap.add_argument("--image-mean", nargs=3, type=float, required=False, help="Override image mean values") ap.add_argument("--image-std", nargs=3, type=float, required=False, help="Override image std values") ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None) @@ -174,6 +175,8 @@ elif args.vision_only and not has_llava_projector: fout.add_description("vision-only CLIP model") elif has_llava_projector: fout.add_description("image encoder for LLaVA") + # add projector type + fout.add_string("clip.projector_type", args.projector_type) else: fout.add_description("two-tower CLIP model") @@ -218,7 +221,8 @@ if has_llava_projector: projector = torch.load(args.llava_projector) for name, data in projector.items(): name = get_tensor_name(name) - if data.ndim == 2: + # pw and dw conv ndim==4 + if data.ndim == 2 or data.ndim == 4: data = data.squeeze().numpy().astype(np.float16) else: data = data.squeeze().numpy().astype(np.float32) diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index da4d1c48b..0abd00afd 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -113,6 +113,43 @@ static results_log_softmax log_softmax(int n_vocab, const float * logits, int to return {logits[tok] - max_logit - log(sum_exp), logits[tok], expf(logits[tok] - max_logit) / (float) sum_exp}; } +static inline int nearest_int(float fval) { + //assert(fval <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} + +static double log_softmax(int n_vocab, const float * logits, uint16_t * log_prob, int tok) { + float max_logit = logits[0]; + float min_logit = logits[0]; + for (int i = 1; i < n_vocab; ++i) { + max_logit = std::max(max_logit, logits[i]); + min_logit = std::min(min_logit, logits[i]); + } + min_logit = std::max(min_logit, max_logit - 16); + double sum_exp = 0.0; + for (int i = 0; i < n_vocab; ++i) { + sum_exp += expf(logits[i] - max_logit); + } + const float log_sum_exp = log(sum_exp); + const float min_log_prob = min_logit - max_logit - log_sum_exp; + const float scale = (max_logit - min_logit)/65535.f; + float * d = (float *)log_prob; + d[0] = scale; + d[1] = min_log_prob; + log_prob += 4; + if (scale) { + const float inv_scale = 1/scale; + for (int i = 0; i < n_vocab; ++i) { + log_prob[i] = logits[i] > min_logit ? nearest_int(inv_scale*(logits[i] - min_logit)) : 0; + } + } else { + std::memset(log_prob, 0, n_vocab*sizeof(uint16_t)); + } + return max_logit + log_sum_exp - logits[tok]; +} + static void process_logits( int n_vocab, const float * logits, const int * tokens, int n_token, std::vector & workers, double & nll, double & nll2, float * logit_history, float * prob_history @@ -148,6 +185,114 @@ static void process_logits( } } +static void process_logits(std::ostream& out, int n_vocab, const float * logits, const int * tokens, int n_token, + std::vector & workers, std::vector & log_probs, double & nll, double & nll2) { + std::mutex mutex; + const int nv = 2*((n_vocab + 1)/2) + 4; + int counter = 0; + auto compute = [&mutex, &counter, &log_probs, &nll, &nll2, n_vocab, logits, tokens, n_token, nv] () { + double local_nll = 0; + double local_nll2 = 0; + while (true) { + std::unique_lock lock(mutex); + int i = counter++; + if (i >= n_token) { + nll += local_nll; nll2 += local_nll2; + break; + } + lock.unlock(); + const double v = log_softmax(n_vocab, logits + i*n_vocab, log_probs.data() + i*nv, tokens[i+1]); + local_nll += v; + local_nll2 += v*v; + } + }; + for (auto & w : workers) { + w = std::thread(compute); + } + compute(); + for (auto & w : workers) { + w.join(); + } + out.write((const char *)log_probs.data(), n_token*nv*sizeof(uint16_t)); +} + +struct kl_divergence_result { + double sum_nll = 0; + double sum_nll2 = 0; + double sum_kld = 0; + double sum_kld2 = 0; + double sum_nll_diff = 0; + double sum_nll_diff2 = 0; + size_t count = 0; +}; + +static void log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) { + float max_logit = logits[0]; + for (int i = 1; i < n_vocab; ++i) { + max_logit = std::max(max_logit, logits[i]); + } + double sum_exp = 0.0; + for (int i = 0; i < n_vocab; ++i) { + sum_exp += expf(logits[i] - max_logit); + } + const float log_sum_exp = log(sum_exp); + const float * d = (const float *)base_log_prob; + const float scale = d[0]; + const float min_log_prob = d[1]; + base_log_prob += 4; + float nll = max_logit + log_sum_exp - logits[tok]; + kld.sum_nll += nll; + kld.sum_nll2 += nll*nll; + nll += (scale*base_log_prob[tok] + min_log_prob); + kld.sum_nll_diff += nll; + kld.sum_nll_diff2 += nll*nll; + max_logit += log_sum_exp; + double sum = 0; + for (int i = 0; i < n_vocab; ++i) { + const float p_log_base = scale*base_log_prob[i] + min_log_prob; + if (p_log_base > -16.f) { + const float p_base = expf(p_log_base); + sum += p_base * (p_log_base - logits[i] + max_logit); + } + } + kld.sum_kld += sum; + kld.sum_kld2 += sum*sum; + ++kld.count; +} + +static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token, + std::vector & workers, const std::vector & base_log_probs, kl_divergence_result & kld) { + std::mutex mutex; + const int nv = 2*((n_vocab + 1)/2) + 4; + int counter = 0; + auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv] () { + kl_divergence_result local_kld; + while (true) { + std::unique_lock lock(mutex); + int i = counter++; + if (i >= n_token) { + kld.sum_nll += local_kld.sum_nll; + kld.sum_nll2 += local_kld.sum_nll2; + kld.sum_kld += local_kld.sum_kld; + kld.sum_kld2 += local_kld.sum_kld2; + kld.sum_nll_diff += local_kld.sum_nll_diff; + kld.sum_nll_diff2 += local_kld.sum_nll_diff2; + kld.count += local_kld.count; + break; + } + lock.unlock(); + log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld); + } + }; + for (auto & w : workers) { + w = std::thread(compute); + } + compute(); + for (auto & w : workers) { + w.join(); + } +} + static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params) { // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research // Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` @@ -295,6 +440,18 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); const int n_ctx = llama_n_ctx(ctx); + std::ofstream logits_stream; + if (!params.logits_file.empty()) { + logits_stream.open(params.logits_file.c_str()); + if (!logits_stream.is_open()) { + fprintf(stderr, "%s: failed to open %s for writing\n", __func__, params.logits_file.c_str()); + return {}; + } + fprintf(stderr, "%s: saving all logits to %s\n", __func__, params.logits_file.c_str()); + logits_stream.write("_logits_", 8); + logits_stream.write((const char *)&n_ctx, sizeof(n_ctx)); + } + auto tim1 = std::chrono::high_resolution_clock::now(); fprintf(stderr, "%s: tokenizing the input ..\n", __func__); @@ -337,6 +494,15 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par std::vector workers(std::thread::hardware_concurrency() - 1); + std::vector log_probs; + if (!params.logits_file.empty()) { + logits_stream.write((const char *)&n_vocab, sizeof(n_vocab)); + logits_stream.write((const char *)&n_chunk, sizeof(n_chunk)); + logits_stream.write((const char *)tokens.data(), n_chunk*n_ctx*sizeof(tokens[0])); + const int nv = 2*((n_vocab + 1)/2) + 4; + log_probs.resize(n_ctx * nv); + } + for (int i = 0; i < n_chunk; ++i) { const int start = i * n_ctx; const int end = start + n_ctx; @@ -399,8 +565,13 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par // process the entire prompt. const int first = n_ctx/2; const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx); - process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, - workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first); + if (!params.logits_file.empty()) { + process_logits(logits_stream, n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, + workers, log_probs, nll, nll2); + } else { + process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, + workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first); + } count += n_ctx - first - 1; // perplexity is e^(average negative log-likelihood) @@ -541,14 +712,14 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) { // This is needed as usual for LLaMA models const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + // The tasks should be randomized so the score stabilizes quickly. + bool randomize_tasks = true; + // Number of tasks to use when computing the score if (params.hellaswag_tasks < hs_task_count) { hs_task_count = params.hellaswag_tasks; } - // The tasks should be randomized so the score stabilizes quickly. - bool randomize_tasks = true; - // The random seed should not impact the final result if the computation is done over enough tasks, so kept hardcoded for now std::mt19937 rng(1); @@ -1032,6 +1203,531 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) { printf("Final Winogrande score(%d tasks): %.4lf +/- %.4lf\n", n_done, 100*p, sigma); } +static bool deserialize_string(std::istream& in, std::string& str) { + uint32_t size; + if (!in.read((char *)&size, sizeof(size)).fail()) { + str.resize(size); + if (!in.read((char *)str.data(), size).fail()) return true; + } + return false; +} + +struct multiple_choice_answers { + std::vector answers; + std::vector labels; + bool deserialize(std::istream& in) { + uint32_t n; + in.read((char *)&n, sizeof(n)); + if (in.fail() || n > 100) return false; // 100 as max. number of answers should be good enough for any practical purpose + answers.resize(n); + labels.resize(n); + for (auto& a : answers) { + if (!deserialize_string(in, a)) return false; + } + in.read((char *)labels.data(), n*sizeof(int)); + return !in.fail(); + } +}; + +struct multiple_choice_task { + std::string question; // the question (or context that needs to be continued) + multiple_choice_answers mc1; // possible answers (continuations) with a single correct answer + multiple_choice_answers mc2; // possible answers (continuations) with multiple correct answers - not handled yet + bool deserialize(std::istream& in) { + if (!deserialize_string(in, question)) return false; + return mc1.deserialize(in) && mc2.deserialize(in); + } + + // For evaluation + size_t i_batch; // starting index in the llama_batch + size_t common_prefix; // max number of initial tokens that are the same in all sentences + size_t required_tokens; // needed number of tokens to evaluate all answers + std::vector> seq_tokens; + std::vector log_probs; +}; + +static bool multiple_choice_prepare_one_task(llama_context * ctx, bool add_bos, multiple_choice_task& task, bool log_error) { + if (task.question.empty() || task.mc1.answers.empty()) { + if (log_error) { + printf("%s: found bad task with empty question and/or answers\n", __func__); + } + return false; + } + task.seq_tokens.reserve(task.mc1.answers.size()); + for (auto& answer : task.mc1.answers) { + if (answer.empty()) { + if (log_error) { + printf("%s: found empty answer\n", __func__); + } + return false; + } + task.seq_tokens.emplace_back(::llama_tokenize(ctx, task.question + " " + answer, add_bos)); + } + auto min_len = task.seq_tokens.front().size(); + for (auto& seq : task.seq_tokens) { + min_len = std::min(min_len, seq.size()); + } + task.common_prefix = 0; + for (size_t k = 0; k < min_len; ++k) { + auto token = task.seq_tokens[0][k]; + bool all_same = true; + for (size_t i = 1; i < task.seq_tokens.size(); ++i) { + if (task.seq_tokens[i][k] != token) { + all_same = false; + break; + } + } + if (!all_same) { + break; + } + ++task.common_prefix; + } + task.required_tokens = task.common_prefix; + for (auto& seq : task.seq_tokens) { + task.required_tokens += seq.size() - task.common_prefix; + } + return true; +} + +// +// Calculates score for multiple choice tasks with single correct answer from prompt. +// Commonly used LLM evaluation metrics of this type are +// * ARC +// * HellaSwag +// * MMLU +// * TruthfulQA +// +// Validation datasets for these 4 tests can be found at +// https://huggingface.co/datasets/ikawrakow/validation-datasets-for-llama.cpp +// The data for these datasets was extracted from +// git@hf.co:datasets/allenai/ai2_arc +// https://github.com/rowanz/hellaswag/blob/master/data/hellaswag_val.jsonl +// git@hf.co:datasets/Stevross/mmlu +// https://huggingface.co/datasets/truthful_qa +// +static void multiple_choice_score(llama_context * ctx, const gpt_params & params) { + + std::istringstream strstream(params.prompt); + uint32_t n_task; + strstream.read((char *)&n_task, sizeof(n_task)); + if (strstream.fail() || n_task == 0) { + printf("%s: no tasks\n", __func__); + return; + } + printf("%s: there are %u tasks in prompt\n", __func__, n_task); + std::vector task_pos(n_task); + strstream.read((char *)task_pos.data(), task_pos.size()*sizeof(uint32_t)); + if (strstream.fail()) { + printf("%s: failed to raad task positions from prompt\n", __func__); + return; + } + + std::vector tasks; + if (params.multiple_choice_tasks == 0 || params.multiple_choice_tasks >= (size_t)n_task) { + // Use all tasks + tasks.resize(n_task); + printf("%s: reading tasks", __func__); + int n_dot = n_task/100; + int i = 0; + for (auto& task : tasks) { + ++i; + if (!task.deserialize(strstream)) { + printf("%s: failed to read task %d of %u\n", __func__, i, n_task); + return; + } + if (i%n_dot == 0) printf("."); + } + printf("done\n"); + } + else { + printf("%s: selecting %zu random tasks from %u tasks available\n", __func__, params.multiple_choice_tasks, n_task); + std::mt19937 rng(1); + std::vector aux(n_task); + for (uint32_t i = 0; i < n_task; ++i) aux[i] = i; + float scale = 1.f/(1.f + (float)std::mt19937::max()); + tasks.resize(params.multiple_choice_tasks); + for (auto& task : tasks) { + int j = (int)(scale * rng() * aux.size()); + int idx = aux[j]; + aux[j] = aux.back(); + aux.pop_back(); + strstream.seekg(task_pos[idx], std::ios::beg); + if (!task.deserialize(strstream)) { + printf("%s: failed to read task %d at position %u\n", __func__, idx, task_pos[idx]); + return; + } + } + n_task = params.multiple_choice_tasks; + } + + // This is needed as usual for LLaMA models + const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + + printf("%s: preparing task data", __func__); + fflush(stdout); + if (n_task > 500) { + printf("..."); + fflush(stdout); + std::atomic counter(0); + std::atomic n_bad(0); + auto prepare = [&counter, &n_bad, &tasks, ctx, add_bos] () { + int num_tasks = tasks.size(); + int n_bad_local = 0; + while (true) { + int first = counter.fetch_add(K_TOKEN_CHUNK); + if (first >= num_tasks) { + if (n_bad_local > 0) n_bad += n_bad_local; + break; + } + int last = std::min(first + K_TOKEN_CHUNK, num_tasks); + for (int i = first; i < last; ++i) { + if (!multiple_choice_prepare_one_task(ctx, add_bos, tasks[i], false)) ++n_bad_local; + } + } + }; + size_t max_thread = std::thread::hardware_concurrency(); + max_thread = std::min(max_thread, (tasks.size() + K_TOKEN_CHUNK - 1)/K_TOKEN_CHUNK); + std::vector workers(max_thread-1); + for (auto& w : workers) w = std::thread(prepare); + prepare(); + for (auto& w : workers) w.join(); + printf("done\n"); + fflush(stdout); + int nbad = n_bad; + if (nbad > 0) { + printf("%s: found %d malformed tasks\n", __func__, nbad); + return; + } + } else { + int n_dot = n_task/100; + int i_task = 0; + for (auto& task : tasks) { + ++i_task; + if (!multiple_choice_prepare_one_task(ctx, add_bos, task, true)) { + return; + } + if (i_task%n_dot == 0) { + printf("."); + fflush(stdout); + } + } + printf("done\n"); + } + + printf("%s : calculating TruthfulQA score over %zu tasks.\n", __func__, tasks.size()); + + printf("\ntask\tacc_norm\n"); + + const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_ctx = llama_n_ctx(ctx); + const int n_batch = params.n_batch; + + const int max_tasks_per_batch = 32; + const int max_seq = 4*max_tasks_per_batch; + + llama_batch batch = llama_batch_init(n_ctx, 0, max_seq); + + std::vector tok_logits(n_vocab); + std::vector batch_logits(n_vocab*n_ctx); + + std::vector> eval_pairs; + std::vector eval_results; + std::vector workers(std::thread::hardware_concurrency()); + std::vector batch_indeces; + + int n_done = 0; + int n_correct = 0; + int n_tot_answers = 0; + + for (size_t i0 = 0; i0 < tasks.size(); i0++) { + int n_cur = 0; + + size_t i1 = i0; + size_t i_batch = 0; // this tells us where in `llama_batch` we are currently + + llama_batch_clear(batch); + + // batch as much tasks as possible into the available context + // each task has 4 unique seuqnce ids - one for each ending + // the common prefix is shared among the 4 sequences to save tokens + // we extract logits only from the last common token and from all ending tokens of each sequence + int s0 = 0; + while (n_cur + (int) tasks[i1].required_tokens <= n_ctx) { + auto& cur_task = tasks[i1]; + + int num_answers = cur_task.seq_tokens.size(); + if (s0 + num_answers > max_seq) { + break; + } + + if (int(batch_indeces.size()) != num_answers) { + batch_indeces.resize(num_answers); + } + for (int s = 0; s < num_answers; ++s) batch_indeces[s] = s0 + s; + + for (size_t i = 0; i < cur_task.common_prefix; ++i) { + //llama_batch_add(batch, cur_task.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false); + llama_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false); + } + batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix + + for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) { + for (size_t i = cur_task.common_prefix; i < cur_task.seq_tokens[s].size(); ++i) { + llama_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, true); + } + } + + s0 += num_answers; + + cur_task.i_batch = i_batch; + i_batch += cur_task.required_tokens; + + n_cur += cur_task.required_tokens; + if (++i1 == tasks.size()) { + break; + } + } + + if (i0 == i1) { + fprintf(stderr, "%s : task %zu does not fit in the context window\n", __func__, i0); + return; + } + + llama_kv_cache_clear(ctx); + + // decode all tasks [i0, i1) + if (!decode_helper(ctx, batch, batch_logits, n_batch, n_vocab)) { + fprintf(stderr, "%s: llama_decode() failed\n", __func__); + return; + } + + // Compute log-probs in parallel + // First we collect all tasks + eval_pairs.clear(); + for (size_t i = i0; i < i1; ++i) { + auto& cur_task = tasks[i]; + size_t li = cur_task.common_prefix; + for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) { + for (size_t j = cur_task.common_prefix; j < cur_task.seq_tokens[s].size() - 1; j++) { + eval_pairs.push_back(std::make_pair(cur_task.i_batch + li++, cur_task.seq_tokens[s][j + 1])); + } + ++li; + } + } + // Then we do the actual calculation + compute_logprobs(batch_logits.data(), n_vocab, workers, eval_pairs, eval_results); + + size_t ir = 0; + + // compute the logprobs for each ending of the decoded tasks + for (size_t i = i0; i < i1; ++i) { + auto & cur_task = tasks[i]; + //printf("==== Evaluating <%s> with correct answer ", cur_task.question.c_str()); + //for (int j = 0; j < int(cur_task.mc1.labels.size()); ++j) { + // if (cur_task.mc1.labels[j] == 1) { + // printf("%d", j+1); + // } + //} + //printf("\n common_prefix: %zu\n", cur_task.common_prefix); + + std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*(cur_task.i_batch + cur_task.common_prefix - 1), n_vocab*sizeof(float)); + + const auto first_probs = softmax(tok_logits); + + cur_task.log_probs.resize(cur_task.seq_tokens.size()); + for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) { + size_t count = 1; + float log_prob = std::log(first_probs[cur_task.seq_tokens[s][cur_task.common_prefix]]); + for (size_t j = cur_task.common_prefix; j < cur_task.seq_tokens[s].size() - 1; j++) { + //printf(" %zu %g\n", ir, eval_results[ir]); + ++count; + log_prob += eval_results[ir++]; + } + cur_task.log_probs[s] = log_prob / count; + //printf(" Final: %g\n", log_prob / count); + //printf(" <%s> : %g\n", cur_task.mc1.answers[s].c_str(), log_prob/count); + } + + // Find the ending with maximum logprob + size_t logprob_max_idx = 0; + float logprob_max_val = cur_task.log_probs[0]; + for (size_t s = 1; s < cur_task.log_probs.size(); s++) { + if (cur_task.log_probs[s] > logprob_max_val) { + logprob_max_val = cur_task.log_probs[s]; + logprob_max_idx = s; + } + } + + n_tot_answers += cur_task.log_probs.size(); + if (cur_task.mc1.labels[logprob_max_idx] == 1) { + ++n_correct; + } + ++n_done; + + // Print the accumulated accuracy mean x 100 + printf("%d\t%.8lf\n", n_done, 100.*n_correct/n_done); + fflush(stdout); + } + + i0 = i1 - 1; + } + + llama_batch_free(batch); + + if (n_done < 100) return; + + float p = 1.f*n_correct/n_done; + float sigma = sqrt(p*(1-p)/(n_done-1)); + printf("\n Final result: %.4f +/- %.4f\n", 100.f*p, 100.f*sigma); + p = 1.f*n_done/n_tot_answers; + sigma = sqrt(p*(1-p)/(n_done-1)); + printf("Random chance: %.4f +/- %.4f\n", 100.f*p, 100.f*sigma); + + printf("\n"); +} + +static void kl_divergence(llama_context * ctx, const gpt_params & params) { + if (params.logits_file.empty()) { + fprintf(stderr, "%s: you must provide a name of a file containing the log probabilities of the base model\n", __func__); + return; + } + std::ifstream in(params.logits_file.c_str(), std::ios::binary); + if (!in) { + fprintf(stderr, "%s: failed to open %s\n", __func__, params.logits_file.c_str()); + return; + } + { + char check[9]; check[8] = 0; + in.read(check, 8); + if (in.fail() || strncmp("_logits_", check, 8) != 0) { + fprintf(stderr, "%s: %s does not look like a file containing log-probabilities\n", __func__, params.logits_file.c_str()); + return; + } + } + + uint32_t n_ctx; + in.read((char *)&n_ctx, sizeof(n_ctx)); + if (n_ctx > llama_n_ctx(ctx)) { + fprintf(stderr, "%s: %s has been computed with %d, while the current context is %d. Increase it with -c and retry\n", + __func__, params.logits_file.c_str(), n_ctx, params.n_ctx); + } + + int n_vocab, n_chunk; + in.read((char *)&n_vocab, sizeof(n_vocab)); + in.read((char *)&n_chunk, sizeof(n_chunk)); + if (in.fail()) { + fprintf(stderr, "%s: failed rwading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str()); + return; + } + if (n_vocab != llama_n_vocab(llama_get_model(ctx))) { + fprintf(stderr, "%s: inconsistent vocabulary (%d vs %d)\n", __func__, n_vocab, llama_n_vocab(llama_get_model(ctx))); + } + + std::vector tokens(n_ctx * n_chunk); + if (in.read((char *)tokens.data(), tokens.size()*sizeof(tokens[0])).fail()) { + fprintf(stderr, "%s: failed reading evaluation tokens from %s\n", __func__, params.logits_file.c_str()); + return; + } + + const int n_batch = params.n_batch; + const int num_batches = (n_ctx + n_batch - 1)/n_batch; + const int nv = 2*((n_vocab + 1)/2) + 4; + const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + + std::vector log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv); + std::vector logits; + if (num_batches > 1) { + logits.reserve(n_ctx * n_vocab); + } + + std::vector workers(std::thread::hardware_concurrency() - 1); + + auto mean_and_uncertainty = [] (double sum, double sum2, size_t count) { + if (count < 1) { + return std::make_pair(0., 0.); + } + double f = sum/count; + double df = sum2/count - f*f; + df = df > 0 && count > 10 ? sqrt(df/(count-1)) : 0.; + return std::make_pair(f, df); + }; + + kl_divergence_result kld; + + for (int i = 0; i < n_chunk; ++i) { + const int start = i * n_ctx; + const int end = start + n_ctx; + + const auto t_start = std::chrono::high_resolution_clock::now(); + + if (in.read((char *)log_probs_uint16.data(), log_probs_uint16.size()*sizeof(uint16_t)).fail()) { + fprintf(stderr, "%s: failed reading log-probs for chunk %d\n", __func__, i); + return; + } + + // clear the KV cache + llama_kv_cache_clear(ctx); + + for (int j = 0; j < num_batches; ++j) { + const int batch_start = start + j * n_batch; + const int batch_size = std::min(end - batch_start, n_batch); + + // save original token and restore it after eval + const auto token_org = tokens[batch_start]; + + // add BOS token for the first batch of each chunk + if (add_bos && j == 0) { + tokens[batch_start] = llama_token_bos(llama_get_model(ctx)); + } + + if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return; + } + + // restore the original token in case it was set to BOS + tokens[batch_start] = token_org; + + if (num_batches > 1) { + const auto * batch_logits = llama_get_logits(ctx); + logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab); + } + } + + const auto t_end = std::chrono::high_resolution_clock::now(); + + if (i == 0) { + const float t_total = std::chrono::duration(t_end - t_start).count(); + fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total); + int total_seconds = (int)(t_total * n_chunk); + if (total_seconds >= 60*60) { + fprintf(stderr, "%d hours ", total_seconds / (60*60)); + total_seconds = total_seconds % (60*60); + } + fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0); + + printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence\n"); + } + + const int first = n_ctx/2; + const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx); + process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, + workers, log_probs_uint16, kld); + + auto ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count); + auto log_ppl_ratio = mean_and_uncertainty(kld.sum_nll_diff, kld.sum_nll_diff2, kld.count); + auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count); + + printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf\n", i+1, exp(ppl.first), + log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second); + + fflush(stdout); + + logits.clear(); + } + printf("\n"); + +} int main(int argc, char ** argv) { gpt_params params; @@ -1092,6 +1788,10 @@ int main(int argc, char ** argv) { hellaswag_score(ctx, params); } else if (params.winogrande) { winogrande_score(ctx, params); + } else if (params.multiple_choice) { + multiple_choice_score(ctx, params); + } else if (params.kl_divergence) { + kl_divergence(ctx, params); } else { results = perplexity(ctx, params); } diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 4171be8b4..5ce5b9a91 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -27,6 +27,7 @@ static const std::vector QUANT_OPTIONS = { { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", }, { "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.16G, +9.0634 ppl @ LLaMA-v1-7B", }, { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" }, + { "Q3_K_XS",LLAMA_FTYPE_MOSTLY_Q3_K_XS,"3-bit extra small quantization" , }, { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", }, { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.07G, +0.2496 ppl @ LLaMA-v1-7B", }, { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", }, diff --git a/ggml-metal.m b/ggml-metal.m index b4426607d..ff5cce8e5 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2246,22 +2246,7 @@ static bool ggml_metal_graph_compute( MTLCommandBufferStatus status = [command_buffer status]; if (status != MTLCommandBufferStatusCompleted) { - if (status == MTLCommandBufferStatusError) { - // Check Metal error code - NSError *error = (MTLCommandBufferError) [ctx->command_buffers[i] error]; - int mtl_error_code = [error code]; - if (([error domain] == MTLCommandBufferErrorDomain) && ([error code] == MTLCommandBufferErrorOutOfMemory)) { - GGML_METAL_LOG_INFO("%s: command buffer %d failed with status MTLCommandBufferStatus.error (5) and error code \ -MTLCommandBufferError.outOfMemory (8)\n"); - printf("Metal ran out of memory. Maybe try a smaller context size, or a smaller (more coarsely quantized) model, \ -preferably one under the recommended max working set size, or else fall back to running on CPU only.\n"); - } else { - GGML_METAL_LOG_INFO("%s: command buffer %d failed with status MTLCommandBufferStatus.error (5) and error code %d\n", - __func__, i, mtl_error_code); - } - } else { - GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status); - } + GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu. Check if you ran out of memory.\n", __func__, i, status); return false; } } diff --git a/ggml.c b/ggml.c index 89186974c..77399f357 100644 --- a/ggml.c +++ b/ggml.c @@ -1418,6 +1418,9 @@ inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); } +// TODO: optimize performance +inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } +inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } static const float GELU_COEF_A = 0.044715f; static const float GELU_QUICK_COEF = -1.702f; @@ -1776,9 +1779,11 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { "GELU", "GELU_QUICK", "SILU", + "HARDSWISH", + "HARDSIGMOID", }; -static_assert(GGML_UNARY_OP_COUNT == 10, "GGML_UNARY_OP_COUNT != 10"); +static_assert(GGML_UNARY_OP_COUNT == 12, "GGML_UNARY_OP_COUNT != 12"); static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); @@ -3945,6 +3950,20 @@ struct ggml_tensor * ggml_silu_back( return result; } +// ggml hardswish +struct ggml_tensor * ggml_hardswish( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSWISH); +} + +// ggml hardsigmoid +struct ggml_tensor * ggml_hardsigmoid( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_HARDSIGMOID); +} + // ggml_norm static struct ggml_tensor * ggml_norm_impl( @@ -5344,6 +5363,33 @@ GGML_API struct ggml_tensor * ggml_conv_transpose_1d( return result; } +// ggml_conv_depthwise +struct ggml_tensor * ggml_conv_depthwise_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + + struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]); + struct ggml_tensor * im2col = ggml_im2col(ctx, new_a, + ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]), + s0, s1, p0, p1, d0, d1, true); // [N * IC, OH, OW, KH * KW] + + struct ggml_tensor * result = + ggml_mul_mat(ctx, + ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1), // [OC,1, KH, KW] => [1, OC, 1, KH * KW] + ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3])); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW] + + result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW] + + return result; +} // ggml_conv_2d // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] @@ -7764,6 +7810,9 @@ static void ggml_compute_forward_acc_f32( bool inplace = (bool) ((int32_t *) dst->op_params)[4]; if (!inplace && (params->type == GGML_TASK_INIT)) { + if (params->ith != 0) { + return; + } // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( @@ -9333,6 +9382,87 @@ static void ggml_compute_forward_silu_back( } } + +static void ggml_compute_forward_hardswish_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_hardswish_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} +static void ggml_compute_forward_hardswish( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_hardswish_f32(params, src0, dst); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + +static void ggml_compute_forward_hardsigmoid_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_hardsigmoid_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +static void ggml_compute_forward_hardsigmoid( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_hardsigmoid_f32(params, src0, dst); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + + // ggml_compute_forward_norm static void ggml_compute_forward_norm_f32( @@ -9825,11 +9955,30 @@ static void ggml_compute_forward_mul_mat( #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(dst)) { - if (params->ith != 0) { - return; - } + const int64_t ne_plane = ne01*ne00; + const int64_t desired_wsize = ne13*ne12*ne_plane*sizeof(float); + UNUSED(desired_wsize); if (params->type == GGML_TASK_INIT) { + if (type != GGML_TYPE_F32) { + assert(params->wsize >= desired_wsize); + // parallelize by src0 rows + for (int64_t i13 = 0; i13 < ne13; i13++) { + for (int64_t i12 = 0; i12 < ne12; i12++) { + // broadcast src0 into src1 across 2nd,3rd dimension + const int64_t i03 = i13/r3; + const int64_t i02 = i12/r2; + + const void * x = (char *) src0->data + i02*nb02 + i03*nb03; + float * const wdata = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane; + ggml_to_float_t const to_float = type_traits[type].to_float; + + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + to_float((const char *) x + i01*nb01, wdata + i01*ne00, ne00); + } + } + } + } return; } @@ -9837,9 +9986,14 @@ static void ggml_compute_forward_mul_mat( return; } + // perform sgemm, parallelization controlled by blas lib + if (ith != 0) { + return; + } + + const int64_t tgemm0 = ggml_perf_time_us(); for (int64_t i13 = 0; i13 < ne13; i13++) { for (int64_t i12 = 0; i12 < ne12; i12++) { - // broadcast src0 into src1 across 2nd,3rd dimension const int64_t i03 = i13/r3; const int64_t i02 = i12/r2; @@ -9848,17 +10002,7 @@ static void ggml_compute_forward_mul_mat( float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); if (type != GGML_TYPE_F32) { - float * const wdata = params->wdata; - ggml_to_float_t const to_float = type_traits[type].to_float; - - size_t id = 0; - for (int64_t i01 = 0; i01 < ne01; ++i01) { - to_float((const char *) x + i01*nb01, wdata + id, ne00); - id += ne00; - } - - assert(id*sizeof(float) <= params->wsize); - x = wdata; + x = (float *) params->wdata + i13*ne12*ne_plane + i12*ne_plane; } cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, @@ -9868,6 +10012,7 @@ static void ggml_compute_forward_mul_mat( 0.0f, d, ne01); } } + //printf("cblas_sgemm = %.3f ms, %lld flops\n", (ggml_perf_time_us() - tgemm0)/1000.0, ne13*ne12*ne1*ne01*ne10*2); //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); @@ -9876,6 +10021,9 @@ static void ggml_compute_forward_mul_mat( #endif if (params->type == GGML_TASK_INIT) { + if (ith != 0) { + return; + } if (src1->type != vec_dot_type) { char * wdata = params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); @@ -10040,6 +10188,9 @@ static void ggml_compute_forward_mul_mat_id( #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne11 + (i1)] if (params->type == GGML_TASK_INIT) { + if (ith != 0) { + return; + } char * wdata = params->wdata; if (src1->type != vec_dot_type) { const size_t row_size = ggml_row_size(vec_dot_type, ne10); @@ -10225,6 +10376,9 @@ static void ggml_compute_forward_out_prod_f32( return; } #endif + if (ith != 0) { + return; + } ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); return; } @@ -10408,6 +10562,9 @@ static void ggml_compute_forward_out_prod_q_f32( // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST) if (params->type == GGML_TASK_INIT) { + if (ith != 0) { + return; + } ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); return; } @@ -10592,6 +10749,9 @@ static void ggml_compute_forward_set_f32( bool inplace = (bool) ((int32_t *) dst->op_params)[4]; if (!inplace && (params->type == GGML_TASK_INIT)) { + if (params->ith != 0) { + return; + } // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( @@ -10916,6 +11076,9 @@ static void ggml_compute_forward_get_rows_back_f32_f16( // ggml_compute_forward_dup_same_cont(params, opt0, dst); if (params->type == GGML_TASK_INIT) { + if (params->ith != 0) { + return; + } memset(dst->data, 0, ggml_nbytes(dst)); } @@ -10950,6 +11113,9 @@ static void ggml_compute_forward_get_rows_back_f32( // ggml_compute_forward_dup_same_cont(params, opt0, dst); if (params->type == GGML_TASK_INIT) { + if (params->ith != 0) { + return; + } memset(dst->data, 0, ggml_nbytes(dst)); } @@ -11087,6 +11253,9 @@ static void ggml_compute_forward_diag_mask_f32( GGML_ASSERT(n_past >= 0); if (!inplace && (params->type == GGML_TASK_INIT)) { + if (ith != 0) { + return; + } // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); @@ -12057,6 +12226,9 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( GGML_ASSERT(nb10 == sizeof(float)); if (params->type == GGML_TASK_INIT) { + if (ith != 0) { + return; + } memset(params->wdata, 0, params->wsize); // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) @@ -12151,6 +12323,9 @@ static void ggml_compute_forward_conv_transpose_1d_f32( GGML_ASSERT(nb10 == sizeof(float)); if (params->type == GGML_TASK_INIT) { + if (ith != 0) { + return; + } memset(params->wdata, 0, params->wsize); // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) @@ -12349,6 +12524,7 @@ static void ggml_compute_forward_im2col( } } + // ggml_compute_forward_conv_transpose_2d static void ggml_compute_forward_conv_transpose_2d( @@ -12374,6 +12550,9 @@ static void ggml_compute_forward_conv_transpose_2d( GGML_ASSERT(nb10 == sizeof(float)); if (params->type == GGML_TASK_INIT) { + if (ith != 0) { + return; + } memset(params->wdata, 0, params->wsize); // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) @@ -13917,6 +14096,14 @@ static void ggml_compute_forward_unary( { ggml_compute_forward_silu(params, src0, dst); } break; + case GGML_UNARY_OP_HARDSWISH: + { + ggml_compute_forward_hardswish(params, src0, dst); + } break; + case GGML_UNARY_OP_HARDSIGMOID: + { + ggml_compute_forward_hardsigmoid(params, src0, dst); + } break; default: { GGML_ASSERT(false); @@ -13980,6 +14167,9 @@ static void ggml_compute_forward_add_rel_pos_f32( const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; if (!inplace && params->type == GGML_TASK_INIT) { + if (params->ith != 0) { + return; + } memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); return; } @@ -16273,8 +16463,9 @@ struct ggml_compute_state_shared { const int n_threads; // synchronization primitives - atomic_int n_active; // num active threads - atomic_int node_n; // active graph node + atomic_int n_active; // num active threads + atomic_int node_n; // active graph node + atomic_int node_task; // active graph node task phase bool (*abort_callback)(void * data); // abort ggml_graph_compute when true void * abort_callback_data; @@ -16330,6 +16521,8 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads + case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads { n_tasks = 1; } break; @@ -16520,6 +16713,34 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { return n_tasks; } +static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_compute_state * state, const bool do_yield) { + // wait for other threads to finish + const int last_node_n = * node_n; + + while (true) { + if (do_yield) { + sched_yield(); + } + + * node_n = atomic_load(&state->shared->node_n); + if (* node_n != last_node_n) break; + } +} + +static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_compute_state * state, const bool do_yield) { + // wait for other threads to finish + const int last_task_phase = * task_phase; + + while (true) { + if (do_yield) { + sched_yield(); + } + + * task_phase = atomic_load(&state->shared->node_task); + if (* task_phase != last_task_phase) break; + } +} + static thread_ret_t ggml_graph_compute_thread(void * data) { struct ggml_compute_state * state = (struct ggml_compute_state *) data; @@ -16530,7 +16751,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { set_numa_thread_affinity(state->ith, n_threads); - int node_n = -1; + int node_n = -1; + int task_phase = GGML_TASK_FINALIZE; while (true) { if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) { @@ -16562,7 +16784,6 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { // distribute new work or execute it direct if 1T while (++node_n < cgraph->n_nodes) { GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes); - struct ggml_tensor * node = cgraph->nodes[node_n]; const int n_tasks = ggml_get_n_tasks(node, n_threads); @@ -16571,13 +16792,13 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { params.nth = n_tasks; - /* INIT */ - if (GGML_OP_HAS_INIT[node->op]) { - params.type = GGML_TASK_INIT; - ggml_compute_forward(¶ms, node); - } - if (n_tasks == 1) { + /* INIT */ + if (GGML_OP_HAS_INIT[node->op]) { + params.type = GGML_TASK_INIT; + ggml_compute_forward(¶ms, node); + } + // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1, // they do something more efficient than spinning (?) params.type = GGML_TASK_COMPUTE; @@ -16598,38 +16819,24 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { } } - atomic_store(&state->shared->n_active, n_threads); - atomic_store(&state->shared->node_n, node_n); + task_phase = GGML_TASK_INIT; + atomic_store(&state->shared->n_active, n_threads); + atomic_store(&state->shared->node_n, node_n); + atomic_store(&state->shared->node_task, task_phase); } else { - // wait for other threads to finish - const int last = node_n; - - const bool do_yield = last < 0 || cgraph->nodes[last]->op == GGML_OP_MUL_MAT; - - while (true) { - // TODO: this sched_yield can have significant impact on the performance - either positive or negative - // depending on the workload and the operating system. - // since it is not clear what is the best approach, it should potentially become user-configurable - // ref: https://github.com/ggerganov/ggml/issues/291 - // UPD: adding the do_yield flag seems to resolve the issue universally - if (do_yield) { - sched_yield(); - } - - node_n = atomic_load(&state->shared->node_n); - if (node_n != last) break; - }; + ggml_graph_compute_thread_sync_node(&node_n, state, false); + ggml_graph_compute_thread_sync_task(&task_phase, state, false); } // check if we should stop if (node_n >= cgraph->n_nodes) break; - /* COMPUTE */ + /* INIT & COMPUTE */ struct ggml_tensor * node = cgraph->nodes[node_n]; const int n_tasks = ggml_get_n_tasks(node, n_threads); struct ggml_compute_params params = { - /*.type =*/ GGML_TASK_COMPUTE, + /*.type =*/ GGML_TASK_INIT, /*.ith =*/ state->ith, /*.nth =*/ n_tasks, /*.wsize =*/ cplan->work_size, @@ -16637,8 +16844,39 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { }; if (state->ith < n_tasks) { + if (GGML_OP_HAS_INIT[node->op]) { + ggml_compute_forward(¶ms, node); + } + } + + if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) { + task_phase = GGML_TASK_COMPUTE; + atomic_store(&state->shared->n_active, n_threads); + atomic_store(&state->shared->node_task, task_phase); + } + else { + // TODO: this sched_yield can have significant impact on the performance - either positive or negative + // depending on the workload and the operating system. + // since it is not clear what is the best approach, it should potentially become user-configurable + // ref: https://github.com/ggerganov/ggml/issues/291 + // UPD: adding the do_yield flag seems to resolve the issue universally + const bool do_yield = node_n < 0 || cgraph->nodes[node_n]->op == GGML_OP_MUL_MAT; + ggml_graph_compute_thread_sync_task(&task_phase, state, do_yield); + } + + if (state->ith < n_tasks) { + params.type = GGML_TASK_COMPUTE; ggml_compute_forward(¶ms, node); } + + if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) { + task_phase = GGML_TASK_FINALIZE; + atomic_store(&state->shared->n_active, n_threads); + atomic_store(&state->shared->node_task, task_phase); + } + else { + ggml_graph_compute_thread_sync_task(&task_phase, state, false); + } } return GGML_EXIT_SUCCESS; @@ -16695,8 +16933,8 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(node)) { if (node->src[0]->type != GGML_TYPE_F32) { - // here we need memory just for single 2D matrix from src0 - cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]); + // here we need memory for fully dequantized matrix from src0 + cur = ggml_type_size(GGML_TYPE_F32)*ggml_nelements(node->src[0]); } } else #endif @@ -16850,6 +17088,7 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { /*.n_threads =*/ n_threads, /*.n_active =*/ n_threads, /*.node_n =*/ -1, + /*.node_task =*/ GGML_TASK_FINALIZE, /*.abort_callback =*/ NULL, /*.abort_callback_data =*/ NULL, }; diff --git a/ggml.h b/ggml.h index 0d2a05339..9f1b99982 100644 --- a/ggml.h +++ b/ggml.h @@ -496,6 +496,8 @@ extern "C" { GGML_UNARY_OP_GELU, GGML_UNARY_OP_GELU_QUICK, GGML_UNARY_OP_SILU, + GGML_UNARY_OP_HARDSWISH, + GGML_UNARY_OP_HARDSIGMOID, GGML_UNARY_OP_COUNT, }; @@ -1039,6 +1041,16 @@ extern "C" { struct ggml_tensor * a, struct ggml_tensor * b); + // hardswish(x) = x * relu6(x + 3) / 6 + GGML_API struct ggml_tensor * ggml_hardswish( + struct ggml_context * ctx, + struct ggml_tensor * a); + + // hardsigmoid(x) = relu6(x + 3) / 6 + GGML_API struct ggml_tensor * ggml_hardsigmoid( + struct ggml_context * ctx, + struct ggml_tensor * a); + // normalize along rows GGML_API struct ggml_tensor * ggml_norm( struct ggml_context * ctx, @@ -1490,6 +1502,18 @@ extern "C" { int d1, bool is_2D); + GGML_API struct ggml_tensor * ggml_conv_depthwise_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1); + GGML_API struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, diff --git a/llama.cpp b/llama.cpp index c04abb9b2..a17c4407f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1330,8 +1330,10 @@ static llama_state g_state; // available llama models enum e_model { MODEL_UNKNOWN, + MODEL_0_5B, MODEL_1B, MODEL_3B, + MODEL_4B, MODEL_7B, MODEL_8B, MODEL_13B, @@ -2670,6 +2672,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; case LLAMA_FTYPE_MOSTLY_IQ2_XXS:return "IQ2_XSS - 2.0625 bpw"; case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw"; + case LLAMA_FTYPE_MOSTLY_Q3_K_XS:return "Q3_K - Extra small"; default: return "unknown, may not work"; } @@ -2885,6 +2888,7 @@ static void llm_load_hparams( ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; case 32: model.type = e_model::MODEL_3B; break; default: model.type = e_model::MODEL_UNKNOWN; } @@ -2903,9 +2907,9 @@ static void llm_load_hparams( { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1B; break; + case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break; case 32: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_13B; break; + case 40: model.type = hparams.n_head == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break; case 80: model.type = e_model::MODEL_70B; break; default: model.type = e_model::MODEL_UNKNOWN; } @@ -3727,6 +3731,11 @@ static bool llm_load_tensors( layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); + // optional bias tensors, present in Stable LM 2 1.6B + layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false); + layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false); + layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}); @@ -5625,12 +5634,24 @@ struct llm_build_context { // compute Q and K and RoPE them struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } Qcur = ggml_rope_custom( ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, @@ -9100,9 +9121,13 @@ struct quantize_state_internal { const llama_model_quantize_params * params; int n_attention_wv = 0; - int n_feed_forward_w2 = 0; + int n_ffn_down = 0; + int n_ffn_gate = 0; + int n_ffn_up = 0; int i_attention_wv = 0; - int i_feed_forward_w2 = 0; + int i_ffn_down = 0; + int i_ffn_gate = 0; + int i_ffn_up = 0; int n_k_quantized = 0; int n_fallback = 0; @@ -9205,8 +9230,8 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty ++qs.i_attention_wv; } else if (name.find("ffn_down") != std::string::npos) { - if (qs.i_feed_forward_w2 < qs.n_feed_forward_w2/8) new_type = GGML_TYPE_Q2_K; - ++qs.i_feed_forward_w2; + if (qs.i_ffn_down < qs.n_ffn_down/8) new_type = GGML_TYPE_Q2_K; + ++qs.i_ffn_down; } else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K; } else if (name.find("attn_v.weight") != std::string::npos) { @@ -9243,18 +9268,21 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty // TODO: explore better strategies new_type = GGML_TYPE_Q8_0; } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS) { + new_type = GGML_TYPE_Q2_K; + } } else if (name.find("ffn_down") != std::string::npos) { const int n_expert = std::max(1, (int)qs.model.hparams.n_expert); int i_layer, n_layer; if (n_expert == 1) { - i_layer = qs.i_feed_forward_w2; - n_layer = qs.n_feed_forward_w2; + i_layer = qs.i_ffn_down; + n_layer = qs.n_ffn_down; } else { // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly - // sprinkled in the model. Hence, simply dividing i_feed_forward_w2 by n_expert does not work + // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work // for getting the current layer as I initially thought, and we need to resort to parsing the // tensor name. - n_layer = qs.n_feed_forward_w2 / n_expert; + n_layer = qs.n_ffn_down / n_expert; if (sscanf(name.c_str(), "blk.%d.ffn_down", &i_layer) != 1) { throw std::runtime_error(format("Failed to determine layer for tensor %s", name.c_str())); } @@ -9263,7 +9291,7 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty } } if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; - else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) { + else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS) { if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K; } else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { @@ -9293,11 +9321,12 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix. new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1; } - ++qs.i_feed_forward_w2; + ++qs.i_ffn_down; } else if (name.find("attn_output.weight") != std::string::npos) { if (arch != LLM_ARCH_FALCON) { if (qs.model.hparams.n_expert == 8) { - if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || + if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS || + ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) { new_type = GGML_TYPE_Q5_K; } @@ -9315,6 +9344,20 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; } + else if (name.find("ffn_gate") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS && !use_more_bits(qs.i_ffn_gate, qs.n_ffn_gate)) { + new_type = GGML_TYPE_Q2_K; + } + ++qs.i_ffn_gate; + } + else if (name.find("ffn_up") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS && !use_more_bits(qs.i_ffn_up, qs.n_ffn_up)) { + new_type = GGML_TYPE_Q2_K; + } + ++qs.i_ffn_up; + } + // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; + //} // IK: let's remove this, else Q2_K is almost the same as Q3_K_S //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) { // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; @@ -9369,8 +9412,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break; // K-quants + case LLAMA_FTYPE_MOSTLY_Q2_K_S: case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break; - case LLAMA_FTYPE_MOSTLY_Q2_K_S: quantized_type = GGML_TYPE_Q2_K; break; + case LLAMA_FTYPE_MOSTLY_Q3_K_XS: case LLAMA_FTYPE_MOSTLY_Q3_K_S: case LLAMA_FTYPE_MOSTLY_Q3_K_M: case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break; @@ -9438,12 +9482,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s ++qs.n_attention_wv; } else if (name.find("ffn_down") != std::string::npos) { - ++qs.n_feed_forward_w2; + ++qs.n_ffn_down; + } + else if (name.find("ffn_gate") != std::string::npos) { + ++qs.n_ffn_gate; + } + else if (name.find("ffn_up") != std::string::npos) { + ++qs.n_ffn_up; } } - if (qs.n_attention_wv != qs.n_feed_forward_w2 || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) { - LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n", - __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer); + if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) { + LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_ffn_down = %d, hparams.n_layer = %d\n", + __func__, qs.n_attention_wv, qs.n_ffn_down, model.hparams.n_layer); } size_t total_size_org = 0; diff --git a/llama.h b/llama.h index be1629a0e..04756bfcd 100644 --- a/llama.h +++ b/llama.h @@ -108,6 +108,7 @@ extern "C" { LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_XS = 22, // except 1d tensors LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file }; diff --git a/unicode.h b/unicode.h index aeca879ea..844eff3da 100644 --- a/unicode.h +++ b/unicode.h @@ -2,8 +2,9 @@ #include #include -#include +#include #include +#include static const std::vector> digit_ranges = { {0x30, 0x39}, {0xB2, 0xB3}, {0xB9, 0xB9}, {0x660, 0x669}, {0x6F0, 0x6F9}, {0x7C0, 0x7C9}, {0x966, 0x96F}, {0x9E6, 0x9EF}, {0xA66, 0xA6F}, {0xAE6, 0xAEF}, {0xB66, 0xB6F}, {0xBE6, 0xBEF}, {0xC66, 0xC6F},