mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
Merge branch 'master' into concedo_experimental
# Conflicts: # CMakeLists.txt # Makefile # README.md # ci/run.sh # tests/test-tokenizer-0-falcon.cpp # tests/test-tokenizer-0-llama.cpp # tests/test-tokenizer-1-bpe.cpp # tests/test-tokenizer-1-llama.cpp
This commit is contained in:
commit
8d5e25008f
60 changed files with 2568 additions and 735 deletions
|
@ -342,7 +342,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
const auto sampler_names = string_split(argv[i], ';');
|
const auto sampler_names = string_split(argv[i], ';');
|
||||||
sparams.samplers_sequence = sampler_types_from_names(sampler_names);
|
sparams.samplers_sequence = sampler_types_from_names(sampler_names, true);
|
||||||
} else if (arg == "--sampling-seq") {
|
} else if (arg == "--sampling-seq") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
@ -672,7 +672,15 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
} else if (arg == "--no-mmap") {
|
} else if (arg == "--no-mmap") {
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
} else if (arg == "--numa") {
|
} else if (arg == "--numa") {
|
||||||
params.numa = true;
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::string value(argv[i]);
|
||||||
|
/**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
|
||||||
|
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
|
||||||
|
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
|
||||||
|
else { invalid_param = true; break; }
|
||||||
} else if (arg == "--verbose-prompt") {
|
} else if (arg == "--verbose-prompt") {
|
||||||
params.verbose_prompt = true;
|
params.verbose_prompt = true;
|
||||||
} else if (arg == "--no-display-prompt") {
|
} else if (arg == "--no-display-prompt") {
|
||||||
|
@ -936,7 +944,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
printf(" -tb N, --threads-batch N\n");
|
printf(" -tb N, --threads-batch N\n");
|
||||||
printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n");
|
printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n");
|
||||||
printf(" -td N, --threads-draft N");
|
printf(" -td N, --threads-draft N");
|
||||||
printf(" number of threads to use during generation (default: same as --threads)");
|
printf(" number of threads to use during generation (default: same as --threads)\n");
|
||||||
printf(" -tbd N, --threads-batch-draft N\n");
|
printf(" -tbd N, --threads-batch-draft N\n");
|
||||||
printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n");
|
printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n");
|
||||||
printf(" -p PROMPT, --prompt PROMPT\n");
|
printf(" -p PROMPT, --prompt PROMPT\n");
|
||||||
|
@ -957,7 +965,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
printf(" -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
|
printf(" -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
|
||||||
printf(" -c N, --ctx-size N size of the prompt context (default: %d, 0 = loaded from model)\n", params.n_ctx);
|
printf(" -c N, --ctx-size N size of the prompt context (default: %d, 0 = loaded from model)\n", params.n_ctx);
|
||||||
printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||||
printf(" --samplers samplers that will be used for generation in the order, separated by \';\' (default: %s)\n", sampler_type_names.c_str());
|
printf(" --samplers samplers that will be used for generation in the order, separated by \';\'\n");
|
||||||
|
printf(" (default: %s)\n", sampler_type_names.c_str());
|
||||||
printf(" --sampling-seq simplified sequence for samplers that will be used (default: %s)\n", sampler_type_chars.c_str());
|
printf(" --sampling-seq simplified sequence for samplers that will be used (default: %s)\n", sampler_type_chars.c_str());
|
||||||
printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k);
|
printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k);
|
||||||
printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p);
|
printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p);
|
||||||
|
@ -1006,7 +1015,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks);
|
printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks);
|
||||||
printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n");
|
printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n");
|
||||||
printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks);
|
printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks);
|
||||||
printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base");
|
printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base\n");
|
||||||
printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||||
printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft);
|
printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft);
|
||||||
printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
||||||
|
@ -1023,7 +1032,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
if (llama_supports_mmap()) {
|
if (llama_supports_mmap()) {
|
||||||
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||||
}
|
}
|
||||||
printf(" --numa attempt optimizations that help on some NUMA systems\n");
|
printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
|
||||||
|
printf(" - distribute: spread execution evenly over all nodes\n");
|
||||||
|
printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
|
||||||
|
printf(" - numactl: use the CPU map provided by numactl\n");
|
||||||
printf(" if run without this previously, it is recommended to drop the system page cache before using this\n");
|
printf(" if run without this previously, it is recommended to drop the system page cache before using this\n");
|
||||||
printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n");
|
printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n");
|
||||||
if (llama_supports_gpu_offload()) {
|
if (llama_supports_gpu_offload()) {
|
||||||
|
@ -1123,34 +1135,50 @@ std::vector<std::string> string_split(std::string input, char separator) {
|
||||||
return parts;
|
return parts;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names) {
|
std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
|
||||||
|
std::unordered_map<std::string, llama_sampler_type> sampler_canonical_name_map {
|
||||||
|
{"top_k", llama_sampler_type::TOP_K},
|
||||||
|
{"top_p", llama_sampler_type::TOP_P},
|
||||||
|
{"typical_p", llama_sampler_type::TYPICAL_P},
|
||||||
|
{"min_p", llama_sampler_type::MIN_P},
|
||||||
|
{"tfs_z", llama_sampler_type::TFS_Z},
|
||||||
|
{"temperature", llama_sampler_type::TEMPERATURE}
|
||||||
|
};
|
||||||
|
|
||||||
// since samplers names are written multiple ways
|
// since samplers names are written multiple ways
|
||||||
// make it ready for both system names and input names
|
// make it ready for both system names and input names
|
||||||
std::unordered_map<std::string, llama_sampler_type> sampler_name_map {
|
std::unordered_map<std::string, llama_sampler_type> sampler_alt_name_map {
|
||||||
{"top_k", llama_sampler_type::TOP_K},
|
|
||||||
{"top-k", llama_sampler_type::TOP_K},
|
{"top-k", llama_sampler_type::TOP_K},
|
||||||
{"top_p", llama_sampler_type::TOP_P},
|
|
||||||
{"top-p", llama_sampler_type::TOP_P},
|
{"top-p", llama_sampler_type::TOP_P},
|
||||||
{"nucleus", llama_sampler_type::TOP_P},
|
{"nucleus", llama_sampler_type::TOP_P},
|
||||||
{"typical_p", llama_sampler_type::TYPICAL_P},
|
|
||||||
{"typical-p", llama_sampler_type::TYPICAL_P},
|
{"typical-p", llama_sampler_type::TYPICAL_P},
|
||||||
{"typical", llama_sampler_type::TYPICAL_P},
|
{"typical", llama_sampler_type::TYPICAL_P},
|
||||||
{"min_p", llama_sampler_type::MIN_P},
|
|
||||||
{"min-p", llama_sampler_type::MIN_P},
|
{"min-p", llama_sampler_type::MIN_P},
|
||||||
{"tfs_z", llama_sampler_type::TFS_Z},
|
|
||||||
{"tfs-z", llama_sampler_type::TFS_Z},
|
{"tfs-z", llama_sampler_type::TFS_Z},
|
||||||
{"tfs", llama_sampler_type::TFS_Z},
|
{"tfs", llama_sampler_type::TFS_Z},
|
||||||
{"temp", llama_sampler_type::TEMP},
|
{"temp", llama_sampler_type::TEMPERATURE}
|
||||||
{"temperature", llama_sampler_type::TEMP}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<llama_sampler_type> sampler_types;
|
std::vector<llama_sampler_type> sampler_types;
|
||||||
sampler_types.reserve(names.size());
|
sampler_types.reserve(names.size());
|
||||||
for (const auto& name : names) {
|
for (const auto & name : names)
|
||||||
const auto sampler_item = sampler_name_map.find(name);
|
{
|
||||||
if (sampler_item != sampler_name_map.end()) {
|
auto sampler_item = sampler_canonical_name_map.find(name);
|
||||||
|
if (sampler_item != sampler_canonical_name_map.end())
|
||||||
|
{
|
||||||
sampler_types.push_back(sampler_item->second);
|
sampler_types.push_back(sampler_item->second);
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (allow_alt_names)
|
||||||
|
{
|
||||||
|
sampler_item = sampler_alt_name_map.find(name);
|
||||||
|
if (sampler_item != sampler_alt_name_map.end())
|
||||||
|
{
|
||||||
|
sampler_types.push_back(sampler_item->second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return sampler_types;
|
return sampler_types;
|
||||||
}
|
}
|
||||||
|
@ -1162,7 +1190,7 @@ std::vector<llama_sampler_type> sampler_types_from_chars(const std::string & nam
|
||||||
{'y', llama_sampler_type::TYPICAL_P},
|
{'y', llama_sampler_type::TYPICAL_P},
|
||||||
{'m', llama_sampler_type::MIN_P},
|
{'m', llama_sampler_type::MIN_P},
|
||||||
{'f', llama_sampler_type::TFS_Z},
|
{'f', llama_sampler_type::TFS_Z},
|
||||||
{'t', llama_sampler_type::TEMP}
|
{'t', llama_sampler_type::TEMPERATURE}
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<llama_sampler_type> sampler_types;
|
std::vector<llama_sampler_type> sampler_types;
|
||||||
|
@ -1183,7 +1211,7 @@ std::string sampler_type_to_name_string(llama_sampler_type sampler_type) {
|
||||||
case llama_sampler_type::TYPICAL_P: return "typical_p";
|
case llama_sampler_type::TYPICAL_P: return "typical_p";
|
||||||
case llama_sampler_type::TOP_P: return "top_p";
|
case llama_sampler_type::TOP_P: return "top_p";
|
||||||
case llama_sampler_type::MIN_P: return "min_p";
|
case llama_sampler_type::MIN_P: return "min_p";
|
||||||
case llama_sampler_type::TEMP: return "temp";
|
case llama_sampler_type::TEMPERATURE: return "temperature";
|
||||||
default : return "";
|
default : return "";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1690,7 +1718,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
||||||
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
||||||
fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false");
|
fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false");
|
||||||
fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false");
|
fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false");
|
||||||
fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false");
|
|
||||||
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
||||||
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
||||||
fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present);
|
fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present);
|
||||||
|
|
|
@ -70,6 +70,7 @@ struct gpt_params {
|
||||||
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
||||||
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
||||||
int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
|
int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
|
||||||
|
ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
int32_t top_k = 40; // <= 0 to use vocab size
|
int32_t top_k = 40; // <= 0 to use vocab size
|
||||||
|
@ -148,7 +149,6 @@ struct gpt_params {
|
||||||
bool logits_all = false; // return logits for all tokens in the batch
|
bool logits_all = false; // return logits for all tokens in the batch
|
||||||
bool use_mmap = true; // use mmap for faster loads
|
bool use_mmap = true; // use mmap for faster loads
|
||||||
bool use_mlock = false; // use mlock to keep model in memory
|
bool use_mlock = false; // use mlock to keep model in memory
|
||||||
bool numa = false; // attempt optimizations that help on some NUMA systems
|
|
||||||
bool verbose_prompt = false; // print prompt tokens before generation
|
bool verbose_prompt = false; // print prompt tokens before generation
|
||||||
bool display_prompt = true; // print prompt before generation
|
bool display_prompt = true; // print prompt before generation
|
||||||
bool infill = false; // use infill mode
|
bool infill = false; // use infill mode
|
||||||
|
@ -179,7 +179,7 @@ void process_escapes(std::string& input);
|
||||||
// String utils
|
// String utils
|
||||||
//
|
//
|
||||||
|
|
||||||
std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names);
|
std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
|
||||||
std::vector<llama_sampler_type> sampler_types_from_chars(const std::string & names_string);
|
std::vector<llama_sampler_type> sampler_types_from_chars(const std::string & names_string);
|
||||||
std::vector<std::string> string_split(std::string input, char separator);
|
std::vector<std::string> string_split(std::string input, char separator);
|
||||||
std::string sampler_type_to_name_string(llama_sampler_type sampler_type);
|
std::string sampler_type_to_name_string(llama_sampler_type sampler_type);
|
||||||
|
|
|
@ -139,7 +139,7 @@ static void sampler_queue(
|
||||||
case llama_sampler_type::TYPICAL_P: llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
|
case llama_sampler_type::TYPICAL_P: llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break;
|
||||||
case llama_sampler_type::TOP_P : llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
|
case llama_sampler_type::TOP_P : llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break;
|
||||||
case llama_sampler_type::MIN_P : llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
|
case llama_sampler_type::MIN_P : llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break;
|
||||||
case llama_sampler_type::TEMP:
|
case llama_sampler_type::TEMPERATURE:
|
||||||
if (dynatemp_range > 0) {
|
if (dynatemp_range > 0) {
|
||||||
float dynatemp_min = std::max(0.0f, temp - dynatemp_range);
|
float dynatemp_min = std::max(0.0f, temp - dynatemp_range);
|
||||||
float dynatemp_max = std::max(0.0f, temp + dynatemp_range);
|
float dynatemp_max = std::max(0.0f, temp + dynatemp_range);
|
||||||
|
|
|
@ -15,7 +15,7 @@ enum class llama_sampler_type : char {
|
||||||
MIN_P = 'm',
|
MIN_P = 'm',
|
||||||
TFS_Z = 'f',
|
TFS_Z = 'f',
|
||||||
TYPICAL_P = 'y',
|
TYPICAL_P = 'y',
|
||||||
TEMP = 't'
|
TEMPERATURE = 't'
|
||||||
};
|
};
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
|
@ -46,7 +46,7 @@ typedef struct llama_sampling_params {
|
||||||
llama_sampler_type::TYPICAL_P,
|
llama_sampler_type::TYPICAL_P,
|
||||||
llama_sampler_type::TOP_P,
|
llama_sampler_type::TOP_P,
|
||||||
llama_sampler_type::MIN_P,
|
llama_sampler_type::MIN_P,
|
||||||
llama_sampler_type::TEMP
|
llama_sampler_type::TEMPERATURE
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string grammar; // optional BNF-like grammar to constrain sampling
|
std::string grammar; // optional BNF-like grammar to constrain sampling
|
||||||
|
|
|
@ -10,7 +10,7 @@ import re
|
||||||
import sys
|
import sys
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast
|
from typing import TYPE_CHECKING, Any, ContextManager, Iterator, Sequence, cast
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
@ -25,15 +25,6 @@ import gguf
|
||||||
from convert import HfVocab
|
from convert import HfVocab
|
||||||
|
|
||||||
|
|
||||||
# check for any of the given keys in the dictionary and return the value of the first key found
|
|
||||||
def get_key_opts(d, keys):
|
|
||||||
for k in keys:
|
|
||||||
if k in d:
|
|
||||||
return d[k]
|
|
||||||
print(f"Could not find any of {keys}")
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
|
|
||||||
###### MODEL DEFINITIONS ######
|
###### MODEL DEFINITIONS ######
|
||||||
|
|
||||||
class SentencePieceTokenTypes(IntEnum):
|
class SentencePieceTokenTypes(IntEnum):
|
||||||
|
@ -58,6 +49,15 @@ class Model:
|
||||||
self.hparams = Model.load_hparams(self.dir_model)
|
self.hparams = Model.load_hparams(self.dir_model)
|
||||||
self.model_arch = self._get_model_architecture()
|
self.model_arch = self._get_model_architecture()
|
||||||
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=False)
|
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=False)
|
||||||
|
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
|
||||||
|
|
||||||
|
def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any:
|
||||||
|
key = next((k for k in keys if k in self.hparams), None)
|
||||||
|
if key is not None:
|
||||||
|
return self.hparams[key]
|
||||||
|
if optional:
|
||||||
|
return None
|
||||||
|
raise KeyError(f"could not find any of: {keys}")
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
self._set_vocab_gpt2()
|
self._set_vocab_gpt2()
|
||||||
|
@ -79,28 +79,33 @@ class Model:
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
self.gguf_writer.add_name(self.dir_model.name)
|
self.gguf_writer.add_name(self.dir_model.name)
|
||||||
self.gguf_writer.add_block_count(self.hparams.get(
|
self.gguf_writer.add_block_count(self.block_count)
|
||||||
"n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")),
|
|
||||||
))
|
if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
|
||||||
if (n_ctx := self.hparams.get("max_position_embeddings")) is not None:
|
|
||||||
self.gguf_writer.add_context_length(n_ctx)
|
self.gguf_writer.add_context_length(n_ctx)
|
||||||
if (n_embd := self.hparams.get("hidden_size")) is not None:
|
|
||||||
|
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
||||||
self.gguf_writer.add_embedding_length(n_embd)
|
self.gguf_writer.add_embedding_length(n_embd)
|
||||||
if (n_ff := self.hparams.get("intermediate_size")) is not None:
|
|
||||||
|
if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
|
||||||
self.gguf_writer.add_feed_forward_length(n_ff)
|
self.gguf_writer.add_feed_forward_length(n_ff)
|
||||||
if (n_head := self.hparams.get("num_attention_heads")) is not None:
|
|
||||||
|
n_head = self.find_hparam(["num_attention_heads", "n_head"])
|
||||||
self.gguf_writer.add_head_count(n_head)
|
self.gguf_writer.add_head_count(n_head)
|
||||||
|
|
||||||
if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
|
if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
|
||||||
self.gguf_writer.add_head_count_kv(n_head_kv)
|
self.gguf_writer.add_head_count_kv(n_head_kv)
|
||||||
|
|
||||||
if (n_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
|
if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
|
||||||
self.gguf_writer.add_layer_norm_rms_eps(n_rms_eps)
|
self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
|
||||||
|
if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon"], optional=True)) is not None:
|
||||||
|
self.gguf_writer.add_layer_norm_eps(f_norm_eps)
|
||||||
if (n_experts := self.hparams.get("num_local_experts")) is not None:
|
if (n_experts := self.hparams.get("num_local_experts")) is not None:
|
||||||
self.gguf_writer.add_expert_count(n_experts)
|
self.gguf_writer.add_expert_count(n_experts)
|
||||||
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
|
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
|
||||||
self.gguf_writer.add_expert_used_count(n_experts_used)
|
self.gguf_writer.add_expert_used_count(n_experts_used)
|
||||||
|
|
||||||
self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
|
||||||
def write_tensors(self):
|
def write_tensors(self):
|
||||||
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
||||||
|
@ -211,6 +216,8 @@ class Model:
|
||||||
return MiniCPMModel
|
return MiniCPMModel
|
||||||
if model_architecture == "BertModel":
|
if model_architecture == "BertModel":
|
||||||
return BertModel
|
return BertModel
|
||||||
|
if model_architecture == "NomicBertModel":
|
||||||
|
return NomicBertModel
|
||||||
return Model
|
return Model
|
||||||
|
|
||||||
def _is_model_safetensors(self) -> bool:
|
def _is_model_safetensors(self) -> bool:
|
||||||
|
@ -268,6 +275,8 @@ class Model:
|
||||||
return gguf.MODEL_ARCH.MINICPM
|
return gguf.MODEL_ARCH.MINICPM
|
||||||
if arch == "BertModel":
|
if arch == "BertModel":
|
||||||
return gguf.MODEL_ARCH.BERT
|
return gguf.MODEL_ARCH.BERT
|
||||||
|
if arch == "NomicBertModel":
|
||||||
|
return gguf.MODEL_ARCH.NOMIC_BERT
|
||||||
|
|
||||||
raise NotImplementedError(f'Architecture "{arch}" not supported!')
|
raise NotImplementedError(f'Architecture "{arch}" not supported!')
|
||||||
|
|
||||||
|
@ -1297,21 +1306,21 @@ class GPT2Model(Model):
|
||||||
|
|
||||||
class Phi2Model(Model):
|
class Phi2Model(Model):
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
block_count = get_key_opts(self.hparams, ["num_hidden_layers", "n_layer"])
|
block_count = self.find_hparam(["num_hidden_layers", "n_layer"])
|
||||||
|
|
||||||
rot_pct = get_key_opts(self.hparams, ["partial_rotary_factor"])
|
rot_pct = self.find_hparam(["partial_rotary_factor"])
|
||||||
n_embd = get_key_opts(self.hparams, ["hidden_size", "n_embd"])
|
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
||||||
n_head = get_key_opts(self.hparams, ["num_attention_heads", "n_head"])
|
n_head = self.find_hparam(["num_attention_heads", "n_head"])
|
||||||
|
|
||||||
self.gguf_writer.add_name("Phi2")
|
self.gguf_writer.add_name("Phi2")
|
||||||
self.gguf_writer.add_context_length(get_key_opts(self.hparams, ["n_positions", "max_position_embeddings"]))
|
self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"]))
|
||||||
|
|
||||||
self.gguf_writer.add_embedding_length(n_embd)
|
self.gguf_writer.add_embedding_length(n_embd)
|
||||||
self.gguf_writer.add_feed_forward_length(4 * n_embd)
|
self.gguf_writer.add_feed_forward_length(4 * n_embd)
|
||||||
self.gguf_writer.add_block_count(block_count)
|
self.gguf_writer.add_block_count(block_count)
|
||||||
self.gguf_writer.add_head_count(n_head)
|
self.gguf_writer.add_head_count(n_head)
|
||||||
self.gguf_writer.add_head_count_kv(n_head)
|
self.gguf_writer.add_head_count_kv(n_head)
|
||||||
self.gguf_writer.add_layer_norm_eps(get_key_opts(self.hparams, ["layer_norm_epsilon", "layer_norm_eps"]))
|
self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"]))
|
||||||
self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
|
self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
self.gguf_writer.add_add_bos_token(False)
|
self.gguf_writer.add_add_bos_token(False)
|
||||||
|
@ -1636,19 +1645,34 @@ in chat mode so that the conversation can end normally.")
|
||||||
class BertModel(Model):
|
class BertModel(Model):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.block_count = self.hparams["num_hidden_layers"]
|
self.vocab_size = None
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
# TODO(cebtenzzre): merge with parent class
|
super().set_gguf_parameters()
|
||||||
self.gguf_writer.add_name(self.dir_model.name)
|
|
||||||
self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
|
|
||||||
self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
|
|
||||||
self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
|
|
||||||
self.gguf_writer.add_block_count(self.block_count)
|
|
||||||
self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
|
|
||||||
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
|
|
||||||
self.gguf_writer.add_causal_attention(False)
|
self.gguf_writer.add_causal_attention(False)
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
|
||||||
|
# get pooling path
|
||||||
|
with open(self.dir_model / "modules.json", encoding="utf-8") as f:
|
||||||
|
modules = json.load(f)
|
||||||
|
pooling_path = None
|
||||||
|
for mod in modules:
|
||||||
|
if mod["type"] == "sentence_transformers.models.Pooling":
|
||||||
|
pooling_path = mod["path"]
|
||||||
|
break
|
||||||
|
|
||||||
|
# get pooling type
|
||||||
|
pooling_type = gguf.PoolingType.NONE
|
||||||
|
if pooling_path is not None:
|
||||||
|
with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f:
|
||||||
|
pooling = json.load(f)
|
||||||
|
if pooling["pooling_mode_mean_tokens"]:
|
||||||
|
pooling_type = gguf.PoolingType.MEAN
|
||||||
|
elif pooling["pooling_mode_cls_token"]:
|
||||||
|
pooling_type = gguf.PoolingType.CLS
|
||||||
|
else:
|
||||||
|
raise NotImplementedError("Only MEAN and CLS pooling types supported")
|
||||||
|
|
||||||
|
self.gguf_writer.add_pooling_type(pooling_type.value)
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
path = self.dir_model
|
path = self.dir_model
|
||||||
|
@ -1658,6 +1682,7 @@ class BertModel(Model):
|
||||||
vocab = HfVocab(path, added_tokens_path)
|
vocab = HfVocab(path, added_tokens_path)
|
||||||
tokens, scores, toktypes = zip(*vocab.all_tokens())
|
tokens, scores, toktypes = zip(*vocab.all_tokens())
|
||||||
assert len(tokens) == vocab.vocab_size
|
assert len(tokens) == vocab.vocab_size
|
||||||
|
self.vocab_size = vocab.vocab_size
|
||||||
|
|
||||||
# we need this to validate the size of the token_type embeddings
|
# we need this to validate the size of the token_type embeddings
|
||||||
# though currently we are passing all zeros to the token_type embeddings
|
# though currently we are passing all zeros to the token_type embeddings
|
||||||
|
@ -1671,7 +1696,7 @@ class BertModel(Model):
|
||||||
if tok.startswith(b"##"):
|
if tok.startswith(b"##"):
|
||||||
return tok[2:]
|
return tok[2:]
|
||||||
return b"\xe2\x96\x81" + tok
|
return b"\xe2\x96\x81" + tok
|
||||||
tokens = [phantom(t, y) for t, y in zip(tokens, toktypes)]
|
tokens = tuple(phantom(t, y) for t, y in zip(tokens, toktypes))
|
||||||
|
|
||||||
# set up bos and eos tokens (cls and sep)
|
# set up bos and eos tokens (cls and sep)
|
||||||
self.gguf_writer.add_bos_token_id(vocab.tokenizer.cls_token_id)
|
self.gguf_writer.add_bos_token_id(vocab.tokenizer.cls_token_id)
|
||||||
|
@ -1723,6 +1748,43 @@ class BertModel(Model):
|
||||||
self.gguf_writer.add_tensor(new_name, data)
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
|
||||||
|
|
||||||
|
class NomicBertModel(BertModel):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
# the HF config claims n_ctx=8192, but it uses RoPE scaling
|
||||||
|
self.hparams["n_ctx"] = 2048
|
||||||
|
|
||||||
|
# SwigLU activation
|
||||||
|
assert self.hparams["activation_function"] == "swiglu"
|
||||||
|
# this doesn't do anything in the HF version
|
||||||
|
assert self.hparams["causal"] is False
|
||||||
|
# no bias tensors
|
||||||
|
assert self.hparams["qkv_proj_bias"] is False
|
||||||
|
assert self.hparams["mlp_fc1_bias"] is False
|
||||||
|
assert self.hparams["mlp_fc2_bias"] is False
|
||||||
|
# norm at end of layer
|
||||||
|
assert self.hparams["prenorm"] is False
|
||||||
|
# standard RoPE
|
||||||
|
assert self.hparams["rotary_emb_fraction"] == 1.0
|
||||||
|
assert self.hparams["rotary_emb_interleaved"] is False
|
||||||
|
assert self.hparams["rotary_emb_scale_base"] is None
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
super().set_gguf_parameters()
|
||||||
|
self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
|
||||||
|
|
||||||
|
def get_tensors(self):
|
||||||
|
assert self.vocab_size is not None
|
||||||
|
for name, data in super().get_tensors():
|
||||||
|
# Nomic Embed's token embeddings tensor is padded, but llama.cpp wants tensor sizes to match exactly.
|
||||||
|
if name == 'embeddings.word_embeddings.weight' and data.shape[1] != self.vocab_size:
|
||||||
|
rounded_vocab_size = (self.vocab_size + 63) // 64 * 64
|
||||||
|
assert data.shape == (rounded_vocab_size, self.hparams["n_embd"])
|
||||||
|
data = data[:self.vocab_size, :]
|
||||||
|
yield name, data
|
||||||
|
|
||||||
|
|
||||||
###### CONVERSION LOGIC ######
|
###### CONVERSION LOGIC ######
|
||||||
|
|
||||||
|
|
||||||
|
|
11
convert.py
11
convert.py
|
@ -1173,7 +1173,7 @@ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyM
|
||||||
for (name, tensor) in model.items()}
|
for (name, tensor) in model.items()}
|
||||||
|
|
||||||
|
|
||||||
def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
|
def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) -> LazyModel:
|
||||||
tmap = gguf.TensorNameMap(ARCH, params.n_layer)
|
tmap = gguf.TensorNameMap(ARCH, params.n_layer)
|
||||||
should_skip: set[gguf.MODEL_TENSOR] = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, []))
|
should_skip: set[gguf.MODEL_TENSOR] = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, []))
|
||||||
|
|
||||||
|
@ -1199,7 +1199,11 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
|
||||||
for name, lazy_tensor in model.items():
|
for name, lazy_tensor in model.items():
|
||||||
tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None)
|
tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None)
|
||||||
if name_new is None:
|
if name_new is None:
|
||||||
raise Exception(f"Unexpected tensor name: {name}")
|
if skip_unknown:
|
||||||
|
print(f"Unexpected tensor name: {name} - skipping")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise Exception(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)")
|
||||||
|
|
||||||
if tensor_type in should_skip:
|
if tensor_type in should_skip:
|
||||||
print(f"skipping tensor {name_new}")
|
print(f"skipping tensor {name_new}")
|
||||||
|
@ -1390,6 +1394,7 @@ def main(args_in: list[str] | None = None) -> None:
|
||||||
parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default=DEFAULT_CONCURRENCY)
|
parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default=DEFAULT_CONCURRENCY)
|
||||||
parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine")
|
parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine")
|
||||||
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
|
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
|
||||||
|
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
||||||
|
|
||||||
args = parser.parse_args(args_in)
|
args = parser.parse_args(args_in)
|
||||||
if args.awq_path:
|
if args.awq_path:
|
||||||
|
@ -1461,7 +1466,7 @@ def main(args_in: list[str] | None = None) -> None:
|
||||||
print(f"Special vocab info: {special_vocab}")
|
print(f"Special vocab info: {special_vocab}")
|
||||||
|
|
||||||
model = model_plus.model
|
model = model_plus.model
|
||||||
model = convert_model_names(model, params)
|
model = convert_model_names(model, params, args.skip_unknown)
|
||||||
ftype = pick_output_type(model, args.outtype)
|
ftype = pick_output_type(model, args.outtype)
|
||||||
model = convert_to_output_type(model, ftype)
|
model = convert_to_output_type(model, ftype)
|
||||||
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
||||||
|
|
|
@ -38,6 +38,7 @@ else()
|
||||||
add_subdirectory(speculative)
|
add_subdirectory(speculative)
|
||||||
add_subdirectory(lookahead)
|
add_subdirectory(lookahead)
|
||||||
add_subdirectory(lookup)
|
add_subdirectory(lookup)
|
||||||
|
add_subdirectory(gguf)
|
||||||
add_subdirectory(train-text-from-scratch)
|
add_subdirectory(train-text-from-scratch)
|
||||||
add_subdirectory(imatrix)
|
add_subdirectory(imatrix)
|
||||||
if (LLAMA_BUILD_SERVER)
|
if (LLAMA_BUILD_SERVER)
|
||||||
|
|
|
@ -82,7 +82,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(argu
|
||||||
let n_len: Int = 32
|
let n_len: Int = 32
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
llama_backend_init(false)
|
llama_backend_init()
|
||||||
defer {
|
defer {
|
||||||
llama_backend_free()
|
llama_backend_free()
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,8 @@ int main(int argc, char ** argv)
|
||||||
// Init LLM :
|
// Init LLM :
|
||||||
//---------------------------------
|
//---------------------------------
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
|
@ -8,6 +8,51 @@
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static std::vector<std::string> split_lines(const std::string & s) {
|
||||||
|
std::string line;
|
||||||
|
std::vector<std::string> lines;
|
||||||
|
std::stringstream ss(s);
|
||||||
|
while (std::getline(ss, line)) {
|
||||||
|
lines.push_back(line);
|
||||||
|
}
|
||||||
|
return lines;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
|
||||||
|
for (size_t i = 0; i < tokens.size(); i++) {
|
||||||
|
llama_batch_add(batch, tokens[i], i, { seq_id }, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void normalize(float * vec, float * out, int n) {
|
||||||
|
float norm = 0;
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
norm += vec[i] * vec[i];
|
||||||
|
}
|
||||||
|
norm = sqrt(norm);
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
out[i] = vec[i] / norm;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
|
||||||
|
// clear previous kv_cache values (irrelevant for embeddings)
|
||||||
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
|
// run model
|
||||||
|
fprintf(stderr, "%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
|
||||||
|
if (llama_decode(ctx, batch) < 0) {
|
||||||
|
fprintf(stderr, "%s : failed to decode\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize on copy
|
||||||
|
for (int k = 0; k < n_seq; k++) {
|
||||||
|
float * emb = llama_get_embeddings_ith(ctx, k);
|
||||||
|
float * out = output + k * n_embd;
|
||||||
|
normalize(emb, out, n_embd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
|
|
||||||
|
@ -30,7 +75,8 @@ int main(int argc, char ** argv) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
@ -56,59 +102,84 @@ int main(int argc, char ** argv) {
|
||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
int n_past = 0;
|
// split the prompt into lines
|
||||||
|
std::vector<std::string> prompts = split_lines(params.prompt);
|
||||||
|
|
||||||
// tokenize the prompt
|
// max batch size
|
||||||
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
const uint64_t n_batch = params.n_batch;
|
||||||
|
GGML_ASSERT(params.n_batch == params.n_ctx);
|
||||||
|
|
||||||
|
// tokenize the prompts and trim
|
||||||
|
std::vector<std::vector<int32_t>> inputs;
|
||||||
|
for (const auto & prompt : prompts) {
|
||||||
|
auto inp = ::llama_tokenize(ctx, prompt, true);
|
||||||
|
if (inp.size() > n_batch) {
|
||||||
|
inp.resize(n_batch);
|
||||||
|
}
|
||||||
|
inputs.push_back(inp);
|
||||||
|
}
|
||||||
|
|
||||||
|
// tokenization stats
|
||||||
if (params.verbose_prompt) {
|
if (params.verbose_prompt) {
|
||||||
fprintf(stderr, "\n");
|
for (int i = 0; i < (int) inputs.size(); i++) {
|
||||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
fprintf(stderr, "%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str());
|
||||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size());
|
||||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
for (int j = 0; j < (int) inputs[i].size(); j++) {
|
||||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
fprintf(stderr, "%6d -> '%s'\n", inputs[i][j], llama_token_to_piece(ctx, inputs[i][j]).c_str());
|
||||||
|
}
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (embd_inp.size() > (size_t)n_ctx) {
|
// initialize batch
|
||||||
fprintf(stderr, "%s: error: prompt is longer than the context window (%zu tokens, n_ctx = %d)\n",
|
const int n_prompts = prompts.size();
|
||||||
__func__, embd_inp.size(), n_ctx);
|
struct llama_batch batch = llama_batch_init(n_batch, 0, n_prompts);
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!embd_inp.empty()) {
|
|
||||||
int n_tokens = std::min(params.n_batch, (int) embd_inp.size());
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(embd_inp.data(), n_tokens, n_past, 0))) {
|
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
n_past += n_tokens;
|
|
||||||
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_tokens);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// allocate output
|
||||||
const int n_embd = llama_n_embd(model);
|
const int n_embd = llama_n_embd(model);
|
||||||
auto * embeddings = llama_get_embeddings(ctx);
|
std::vector<float> embeddings(n_prompts * n_embd, 0);
|
||||||
|
float * emb = embeddings.data();
|
||||||
|
|
||||||
// l2-normalize embeddings
|
// break into batches
|
||||||
float norm = 0;
|
int p = 0; // number of prompts processed already
|
||||||
for (int i = 0; i < n_embd; i++) {
|
int s = 0; // number of prompts in current batch
|
||||||
norm += embeddings[i] * embeddings[i];
|
for (int k = 0; k < n_prompts; k++) {
|
||||||
}
|
// clamp to n_batch tokens
|
||||||
norm = sqrt(norm);
|
auto & inp = inputs[k];
|
||||||
for (int i = 0; i < n_embd; i++) {
|
const uint64_t n_toks = inp.size();
|
||||||
embeddings[i] /= norm;
|
|
||||||
|
// encode if at capacity
|
||||||
|
if (batch.n_tokens + n_toks > n_batch) {
|
||||||
|
float * out = emb + p * n_embd;
|
||||||
|
batch_decode(ctx, batch, out, s, n_embd);
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
p += s;
|
||||||
|
s = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < n_embd; i++) {
|
// add to batch
|
||||||
printf("%f ", embeddings[i]);
|
batch_add_seq(batch, inp, s);
|
||||||
|
s += 1;
|
||||||
}
|
}
|
||||||
printf("\n");
|
|
||||||
|
|
||||||
|
// final batch
|
||||||
|
float * out = emb + p * n_embd;
|
||||||
|
batch_decode(ctx, batch, out, s, n_embd);
|
||||||
|
|
||||||
|
// print first 3 embeddings
|
||||||
|
for (int j = 0; j < std::min(3, n_prompts); j++) {
|
||||||
|
fprintf(stderr, "embedding %d: ", j);
|
||||||
|
for (int i = 0; i < n_embd; i++) {
|
||||||
|
fprintf(stderr, "%f ", emb[j * n_embd + i]);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
|
}
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
|
// clean up
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -80,9 +80,9 @@ The LORA rank can be configured for each model tensor type separately with these
|
||||||
--rank-wk N LORA rank for wk tensor (default 4)
|
--rank-wk N LORA rank for wk tensor (default 4)
|
||||||
--rank-wv N LORA rank for wv tensor (default 4)
|
--rank-wv N LORA rank for wv tensor (default 4)
|
||||||
--rank-wo N LORA rank for wo tensor (default 4)
|
--rank-wo N LORA rank for wo tensor (default 4)
|
||||||
--rank-w1 N LORA rank for w1 tensor (default 4)
|
--rank-ffn_gate N LORA rank for ffn_gate tensor (default 4)
|
||||||
--rank-w2 N LORA rank for w2 tensor (default 4)
|
--rank-ffn_down N LORA rank for ffn_down tensor (default 4)
|
||||||
--rank-w3 N LORA rank for w3 tensor (default 4)
|
--rank-ffn_up N LORA rank for ffn_up tensor (default 4)
|
||||||
```
|
```
|
||||||
|
|
||||||
The LORA rank of 'norm' tensors should always be 1.
|
The LORA rank of 'norm' tensors should always be 1.
|
||||||
|
|
|
@ -60,9 +60,9 @@ struct my_llama_layer {
|
||||||
struct ggml_tensor * ffn_norm;
|
struct ggml_tensor * ffn_norm;
|
||||||
|
|
||||||
// ff
|
// ff
|
||||||
struct ggml_tensor * w1;
|
struct ggml_tensor * ffn_gate; // w1
|
||||||
struct ggml_tensor * w2;
|
struct ggml_tensor * ffn_down; // w2
|
||||||
struct ggml_tensor * w3;
|
struct ggml_tensor * ffn_up; // w3
|
||||||
};
|
};
|
||||||
|
|
||||||
struct my_llama_model {
|
struct my_llama_model {
|
||||||
|
@ -85,9 +85,9 @@ struct my_llama_lora_hparams {
|
||||||
uint32_t n_rank_wv = 4;
|
uint32_t n_rank_wv = 4;
|
||||||
uint32_t n_rank_wo = 4;
|
uint32_t n_rank_wo = 4;
|
||||||
uint32_t n_rank_ffn_norm = 1;
|
uint32_t n_rank_ffn_norm = 1;
|
||||||
uint32_t n_rank_w1 = 4;
|
uint32_t n_rank_ffn_gate = 4;
|
||||||
uint32_t n_rank_w2 = 4;
|
uint32_t n_rank_ffn_down = 4;
|
||||||
uint32_t n_rank_w3 = 4;
|
uint32_t n_rank_ffn_up = 4;
|
||||||
uint32_t n_rank_tok_embeddings = 4;
|
uint32_t n_rank_tok_embeddings = 4;
|
||||||
uint32_t n_rank_norm = 1;
|
uint32_t n_rank_norm = 1;
|
||||||
uint32_t n_rank_output = 4;
|
uint32_t n_rank_output = 4;
|
||||||
|
@ -117,12 +117,12 @@ struct my_llama_lora_layer {
|
||||||
struct ggml_tensor * ffn_norm_b;
|
struct ggml_tensor * ffn_norm_b;
|
||||||
|
|
||||||
// ff
|
// ff
|
||||||
struct ggml_tensor * w1_a;
|
struct ggml_tensor * ffn_gate_a;
|
||||||
struct ggml_tensor * w1_b;
|
struct ggml_tensor * ffn_gate_b;
|
||||||
struct ggml_tensor * w2_a;
|
struct ggml_tensor * ffn_down_a;
|
||||||
struct ggml_tensor * w2_b;
|
struct ggml_tensor * ffn_down_b;
|
||||||
struct ggml_tensor * w3_a;
|
struct ggml_tensor * ffn_up_a;
|
||||||
struct ggml_tensor * w3_b;
|
struct ggml_tensor * ffn_up_b;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct my_llama_lora {
|
struct my_llama_lora {
|
||||||
|
@ -208,9 +208,9 @@ static void print_lora_params(struct my_llama_lora_hparams * params) {
|
||||||
printf("%s: n_rank_wv : %u\n", __func__, params->n_rank_wv);
|
printf("%s: n_rank_wv : %u\n", __func__, params->n_rank_wv);
|
||||||
printf("%s: n_rank_wo : %u\n", __func__, params->n_rank_wo);
|
printf("%s: n_rank_wo : %u\n", __func__, params->n_rank_wo);
|
||||||
printf("%s: n_rank_ffn_norm : %u\n", __func__, params->n_rank_ffn_norm);
|
printf("%s: n_rank_ffn_norm : %u\n", __func__, params->n_rank_ffn_norm);
|
||||||
printf("%s: n_rank_w1 : %u\n", __func__, params->n_rank_w1);
|
printf("%s: n_rank_ffn_gate : %u\n", __func__, params->n_rank_ffn_gate);
|
||||||
printf("%s: n_rank_w2 : %u\n", __func__, params->n_rank_w2);
|
printf("%s: n_rank_ffn_down : %u\n", __func__, params->n_rank_ffn_down);
|
||||||
printf("%s: n_rank_w3 : %u\n", __func__, params->n_rank_w3);
|
printf("%s: n_rank_ffn_up : %u\n", __func__, params->n_rank_ffn_up);
|
||||||
printf("%s: n_rank_tok_embeddings : %u\n", __func__, params->n_rank_tok_embeddings);
|
printf("%s: n_rank_tok_embeddings : %u\n", __func__, params->n_rank_tok_embeddings);
|
||||||
printf("%s: n_rank_norm : %u\n", __func__, params->n_rank_norm);
|
printf("%s: n_rank_norm : %u\n", __func__, params->n_rank_norm);
|
||||||
printf("%s: n_rank_output : %u\n", __func__, params->n_rank_output);
|
printf("%s: n_rank_output : %u\n", __func__, params->n_rank_output);
|
||||||
|
@ -319,9 +319,9 @@ static void init_model(struct llama_model * input, struct my_llama_model * model
|
||||||
layer.wv = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_V, i));
|
layer.wv = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_V, i));
|
||||||
layer.wo = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_OUT, i));
|
layer.wo = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_OUT, i));
|
||||||
layer.ffn_norm = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_NORM, i));
|
layer.ffn_norm = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_NORM, i));
|
||||||
layer.w1 = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_GATE, i));
|
layer.ffn_gate = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_GATE, i));
|
||||||
layer.w2 = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_DOWN, i));
|
layer.ffn_down = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_DOWN, i));
|
||||||
layer.w3 = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_UP, i));
|
layer.ffn_up = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_UP, i));
|
||||||
|
|
||||||
assert_shape_1d(layer.attention_norm, hparams.n_embd);
|
assert_shape_1d(layer.attention_norm, hparams.n_embd);
|
||||||
assert_shape_2d(layer.wq, hparams.n_embd, hparams.n_embd);
|
assert_shape_2d(layer.wq, hparams.n_embd, hparams.n_embd);
|
||||||
|
@ -329,9 +329,9 @@ static void init_model(struct llama_model * input, struct my_llama_model * model
|
||||||
assert_shape_2d(layer.wv, hparams.n_embd, hparams.n_embd_gqa());
|
assert_shape_2d(layer.wv, hparams.n_embd, hparams.n_embd_gqa());
|
||||||
assert_shape_2d(layer.wo, hparams.n_embd, hparams.n_embd);
|
assert_shape_2d(layer.wo, hparams.n_embd, hparams.n_embd);
|
||||||
assert_shape_1d(layer.ffn_norm, hparams.n_embd);
|
assert_shape_1d(layer.ffn_norm, hparams.n_embd);
|
||||||
assert_shape_2d(layer.w1, hparams.n_embd, hparams.n_ff);
|
assert_shape_2d(layer.ffn_gate, hparams.n_embd, hparams.n_ff);
|
||||||
assert_shape_2d(layer.w2, hparams.n_ff, hparams.n_embd);
|
assert_shape_2d(layer.ffn_down, hparams.n_ff, hparams.n_embd);
|
||||||
assert_shape_2d(layer.w3, hparams.n_embd, hparams.n_ff);
|
assert_shape_2d(layer.ffn_up, hparams.n_embd, hparams.n_ff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,12 +362,12 @@ static void set_param_lora(struct my_llama_lora * lora) {
|
||||||
ggml_set_param(ctx, layer.wo_b);
|
ggml_set_param(ctx, layer.wo_b);
|
||||||
ggml_set_param(ctx, layer.ffn_norm_a);
|
ggml_set_param(ctx, layer.ffn_norm_a);
|
||||||
ggml_set_param(ctx, layer.ffn_norm_b);
|
ggml_set_param(ctx, layer.ffn_norm_b);
|
||||||
ggml_set_param(ctx, layer.w1_a);
|
ggml_set_param(ctx, layer.ffn_gate_a);
|
||||||
ggml_set_param(ctx, layer.w1_b);
|
ggml_set_param(ctx, layer.ffn_gate_b);
|
||||||
ggml_set_param(ctx, layer.w2_a);
|
ggml_set_param(ctx, layer.ffn_down_a);
|
||||||
ggml_set_param(ctx, layer.w2_b);
|
ggml_set_param(ctx, layer.ffn_down_b);
|
||||||
ggml_set_param(ctx, layer.w3_a);
|
ggml_set_param(ctx, layer.ffn_up_a);
|
||||||
ggml_set_param(ctx, layer.w3_b);
|
ggml_set_param(ctx, layer.ffn_up_b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -435,12 +435,12 @@ static void init_lora(const struct my_llama_model * model, struct my_llama_lora
|
||||||
layer.ffn_norm_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, n_embd);
|
layer.ffn_norm_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, n_embd);
|
||||||
layer.ffn_norm_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, 1);
|
layer.ffn_norm_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, 1);
|
||||||
|
|
||||||
layer.w1_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w1, n_embd);
|
layer.ffn_gate_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_gate, n_embd);
|
||||||
layer.w1_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w1, n_ff);
|
layer.ffn_gate_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_gate, n_ff);
|
||||||
layer.w2_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w2, n_ff);
|
layer.ffn_down_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_down, n_ff);
|
||||||
layer.w2_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w2, n_embd);
|
layer.ffn_down_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_down, n_embd);
|
||||||
layer.w3_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w3, n_embd);
|
layer.ffn_up_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_up, n_embd);
|
||||||
layer.w3_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w3, n_ff);
|
layer.ffn_up_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_up, n_ff);
|
||||||
|
|
||||||
ggml_set_name(layer.attention_norm_a, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_a", i));
|
ggml_set_name(layer.attention_norm_a, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_a", i));
|
||||||
ggml_set_name(layer.attention_norm_b, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_b", i));
|
ggml_set_name(layer.attention_norm_b, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_b", i));
|
||||||
|
@ -454,12 +454,12 @@ static void init_lora(const struct my_llama_model * model, struct my_llama_lora
|
||||||
ggml_set_name(layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, ".weight.lora_b", i));
|
ggml_set_name(layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, ".weight.lora_b", i));
|
||||||
ggml_set_name(layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_a", i));
|
ggml_set_name(layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_a", i));
|
||||||
ggml_set_name(layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_b", i));
|
ggml_set_name(layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_b", i));
|
||||||
ggml_set_name(layer.w1_a, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_a", i));
|
ggml_set_name(layer.ffn_gate_a, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_a", i));
|
||||||
ggml_set_name(layer.w1_b, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_b", i));
|
ggml_set_name(layer.ffn_gate_b, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_b", i));
|
||||||
ggml_set_name(layer.w2_a, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_a", i));
|
ggml_set_name(layer.ffn_down_a, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_a", i));
|
||||||
ggml_set_name(layer.w2_b, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_b", i));
|
ggml_set_name(layer.ffn_down_b, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_b", i));
|
||||||
ggml_set_name(layer.w3_a, tni(LLM_TENSOR_FFN_UP, ".weight.lora_a", i));
|
ggml_set_name(layer.ffn_up_a, tni(LLM_TENSOR_FFN_UP, ".weight.lora_a", i));
|
||||||
ggml_set_name(layer.w3_b, tni(LLM_TENSOR_FFN_UP, ".weight.lora_b", i));
|
ggml_set_name(layer.ffn_up_b, tni(LLM_TENSOR_FFN_UP, ".weight.lora_b", i));
|
||||||
}
|
}
|
||||||
|
|
||||||
set_param_lora(lora);
|
set_param_lora(lora);
|
||||||
|
@ -497,12 +497,12 @@ static void randomize_lora(struct my_llama_lora * lora, int seed, float mean, fl
|
||||||
randomize_tensor_normal(layer.ffn_norm_a, rnd);
|
randomize_tensor_normal(layer.ffn_norm_a, rnd);
|
||||||
ggml_set_zero(layer.ffn_norm_b);
|
ggml_set_zero(layer.ffn_norm_b);
|
||||||
|
|
||||||
randomize_tensor_normal(layer.w1_a, rnd);
|
randomize_tensor_normal(layer.ffn_gate_a, rnd);
|
||||||
ggml_set_zero(layer.w1_b);
|
ggml_set_zero(layer.ffn_gate_b);
|
||||||
randomize_tensor_normal(layer.w2_a, rnd);
|
randomize_tensor_normal(layer.ffn_down_a, rnd);
|
||||||
ggml_set_zero(layer.w2_b);
|
ggml_set_zero(layer.ffn_down_b);
|
||||||
randomize_tensor_normal(layer.w3_a, rnd);
|
randomize_tensor_normal(layer.ffn_up_a, rnd);
|
||||||
ggml_set_zero(layer.w3_b);
|
ggml_set_zero(layer.ffn_up_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_random_normal_distribution(rnd);
|
free_random_normal_distribution(rnd);
|
||||||
|
@ -614,9 +614,9 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
struct ggml_tensor * wk = add_to_f32(ctx, layer.wk, ggml_mul_mat(ctx, llayer.wk_a, llayer.wk_b));
|
struct ggml_tensor * wk = add_to_f32(ctx, layer.wk, ggml_mul_mat(ctx, llayer.wk_a, llayer.wk_b));
|
||||||
struct ggml_tensor * wv = add_to_f32(ctx, layer.wv, ggml_mul_mat(ctx, llayer.wv_a, llayer.wv_b));
|
struct ggml_tensor * wv = add_to_f32(ctx, layer.wv, ggml_mul_mat(ctx, llayer.wv_a, llayer.wv_b));
|
||||||
struct ggml_tensor * wo = add_to_f32(ctx, layer.wo, ggml_mul_mat(ctx, llayer.wo_a, llayer.wo_b));
|
struct ggml_tensor * wo = add_to_f32(ctx, layer.wo, ggml_mul_mat(ctx, llayer.wo_a, llayer.wo_b));
|
||||||
struct ggml_tensor * w1 = add_to_f32(ctx, layer.w1, ggml_mul_mat(ctx, llayer.w1_a, llayer.w1_b));
|
struct ggml_tensor * ffn_gate = add_to_f32(ctx, layer.ffn_gate, ggml_mul_mat(ctx, llayer.ffn_gate_a, llayer.ffn_gate_b));
|
||||||
struct ggml_tensor * w2 = add_to_f32(ctx, layer.w2, ggml_mul_mat(ctx, llayer.w2_a, llayer.w2_b));
|
struct ggml_tensor * ffn_down = add_to_f32(ctx, layer.ffn_down, ggml_mul_mat(ctx, llayer.ffn_down_a, llayer.ffn_down_b));
|
||||||
struct ggml_tensor * w3 = add_to_f32(ctx, layer.w3, ggml_mul_mat(ctx, llayer.w3_a, llayer.w3_b));
|
struct ggml_tensor * ffn_up = add_to_f32(ctx, layer.ffn_up, ggml_mul_mat(ctx, llayer.ffn_up_a, llayer.ffn_up_b));
|
||||||
|
|
||||||
struct ggml_tensor * t02 = ggml_rms_norm (ctx, cur, rms_norm_eps); set_name(t02, "t02"); assert_shape_2d(t02, n_embd, N*n_batch);
|
struct ggml_tensor * t02 = ggml_rms_norm (ctx, cur, rms_norm_eps); set_name(t02, "t02"); assert_shape_2d(t02, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t03 = ggml_repeat (ctx, attention_norm, t02); set_name(t03, "t03"); assert_shape_2d(t03, n_embd, N*n_batch);
|
struct ggml_tensor * t03 = ggml_repeat (ctx, attention_norm, t02); set_name(t03, "t03"); assert_shape_2d(t03, n_embd, N*n_batch);
|
||||||
|
@ -659,11 +659,11 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, rms_norm_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch);
|
struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, rms_norm_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t23 = ggml_repeat (ctx, ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch);
|
struct ggml_tensor * t23 = ggml_repeat (ctx, ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch);
|
struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t25 = ggml_mul_mat (ctx, w3, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch);
|
struct ggml_tensor * t25 = ggml_mul_mat (ctx, ffn_up, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t26 = ggml_mul_mat (ctx, w1, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch);
|
struct ggml_tensor * t26 = ggml_mul_mat (ctx, ffn_gate, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch);
|
struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch);
|
struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t29 = ggml_mul_mat (ctx, w2, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch);
|
struct ggml_tensor * t29 = ggml_mul_mat (ctx, ffn_down, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch);
|
struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch);
|
||||||
cur = t30;
|
cur = t30;
|
||||||
if (enable_checkpointing) {
|
if (enable_checkpointing) {
|
||||||
|
@ -723,9 +723,9 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, 1.0f));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, 1.0f));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, 1.0f));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w1, 1.0f));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_gate, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w2, 1.0f));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_down, 1.0f));
|
||||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w3, 1.0f));
|
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_up, 1.0f));
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocating checkpoints in one block to reduce memory fragmentation
|
// allocating checkpoints in one block to reduce memory fragmentation
|
||||||
|
@ -798,9 +798,9 @@ static void load_llama_lora_gguf(struct gguf_context * fctx, struct ggml_context
|
||||||
GGUF_GET_KEY(fctx, lora->hparams.n_rank_wv, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_V);
|
GGUF_GET_KEY(fctx, lora->hparams.n_rank_wv, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_V);
|
||||||
GGUF_GET_KEY(fctx, lora->hparams.n_rank_wo, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT);
|
GGUF_GET_KEY(fctx, lora->hparams.n_rank_wo, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT);
|
||||||
GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_norm, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_NORM);
|
GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_norm, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_NORM);
|
||||||
GGUF_GET_KEY(fctx, lora->hparams.n_rank_w1, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_GATE);
|
GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_gate, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_GATE);
|
||||||
GGUF_GET_KEY(fctx, lora->hparams.n_rank_w2, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN);
|
GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_down, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN);
|
||||||
GGUF_GET_KEY(fctx, lora->hparams.n_rank_w3, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_UP);
|
GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_up, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_UP);
|
||||||
|
|
||||||
init_lora(model, lora);
|
init_lora(model, lora);
|
||||||
|
|
||||||
|
@ -825,12 +825,12 @@ static void load_llama_lora_gguf(struct gguf_context * fctx, struct ggml_context
|
||||||
copy_tensor_by_name(layer.wo_b, f_ggml_ctx, ggml_get_name(layer.wo_b));
|
copy_tensor_by_name(layer.wo_b, f_ggml_ctx, ggml_get_name(layer.wo_b));
|
||||||
copy_tensor_by_name(layer.ffn_norm_a, f_ggml_ctx, ggml_get_name(layer.ffn_norm_a));
|
copy_tensor_by_name(layer.ffn_norm_a, f_ggml_ctx, ggml_get_name(layer.ffn_norm_a));
|
||||||
copy_tensor_by_name(layer.ffn_norm_b, f_ggml_ctx, ggml_get_name(layer.ffn_norm_b));
|
copy_tensor_by_name(layer.ffn_norm_b, f_ggml_ctx, ggml_get_name(layer.ffn_norm_b));
|
||||||
copy_tensor_by_name(layer.w1_a, f_ggml_ctx, ggml_get_name(layer.w1_a));
|
copy_tensor_by_name(layer.ffn_gate_a, f_ggml_ctx, ggml_get_name(layer.ffn_gate_a));
|
||||||
copy_tensor_by_name(layer.w1_b, f_ggml_ctx, ggml_get_name(layer.w1_b));
|
copy_tensor_by_name(layer.ffn_gate_b, f_ggml_ctx, ggml_get_name(layer.ffn_gate_b));
|
||||||
copy_tensor_by_name(layer.w2_a, f_ggml_ctx, ggml_get_name(layer.w2_a));
|
copy_tensor_by_name(layer.ffn_down_a, f_ggml_ctx, ggml_get_name(layer.ffn_down_a));
|
||||||
copy_tensor_by_name(layer.w2_b, f_ggml_ctx, ggml_get_name(layer.w2_b));
|
copy_tensor_by_name(layer.ffn_down_b, f_ggml_ctx, ggml_get_name(layer.ffn_down_b));
|
||||||
copy_tensor_by_name(layer.w3_a, f_ggml_ctx, ggml_get_name(layer.w3_a));
|
copy_tensor_by_name(layer.ffn_up_a, f_ggml_ctx, ggml_get_name(layer.ffn_up_a));
|
||||||
copy_tensor_by_name(layer.w3_b, f_ggml_ctx, ggml_get_name(layer.w3_b));
|
copy_tensor_by_name(layer.ffn_up_b, f_ggml_ctx, ggml_get_name(layer.ffn_up_b));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -868,9 +868,9 @@ static void save_llama_lora_gguf(struct gguf_context * fctx, struct my_llama_mod
|
||||||
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_V, lora->hparams.n_rank_wv);
|
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_V, lora->hparams.n_rank_wv);
|
||||||
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT, lora->hparams.n_rank_wo);
|
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT, lora->hparams.n_rank_wo);
|
||||||
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_NORM, lora->hparams.n_rank_ffn_norm);
|
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_NORM, lora->hparams.n_rank_ffn_norm);
|
||||||
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_GATE, lora->hparams.n_rank_w1);
|
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_GATE, lora->hparams.n_rank_ffn_gate);
|
||||||
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN, lora->hparams.n_rank_w2);
|
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN, lora->hparams.n_rank_ffn_down);
|
||||||
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_UP, lora->hparams.n_rank_w3);
|
gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_UP, lora->hparams.n_rank_ffn_up);
|
||||||
|
|
||||||
gguf_add_tensor(fctx, lora->tok_embeddings_a);
|
gguf_add_tensor(fctx, lora->tok_embeddings_a);
|
||||||
gguf_add_tensor(fctx, lora->tok_embeddings_b);
|
gguf_add_tensor(fctx, lora->tok_embeddings_b);
|
||||||
|
@ -894,12 +894,12 @@ static void save_llama_lora_gguf(struct gguf_context * fctx, struct my_llama_mod
|
||||||
gguf_add_tensor(fctx, layer.wo_b);
|
gguf_add_tensor(fctx, layer.wo_b);
|
||||||
gguf_add_tensor(fctx, layer.ffn_norm_a);
|
gguf_add_tensor(fctx, layer.ffn_norm_a);
|
||||||
gguf_add_tensor(fctx, layer.ffn_norm_b);
|
gguf_add_tensor(fctx, layer.ffn_norm_b);
|
||||||
gguf_add_tensor(fctx, layer.w1_a);
|
gguf_add_tensor(fctx, layer.ffn_gate_a);
|
||||||
gguf_add_tensor(fctx, layer.w1_b);
|
gguf_add_tensor(fctx, layer.ffn_gate_b);
|
||||||
gguf_add_tensor(fctx, layer.w2_a);
|
gguf_add_tensor(fctx, layer.ffn_down_a);
|
||||||
gguf_add_tensor(fctx, layer.w2_b);
|
gguf_add_tensor(fctx, layer.ffn_down_b);
|
||||||
gguf_add_tensor(fctx, layer.w3_a);
|
gguf_add_tensor(fctx, layer.ffn_up_a);
|
||||||
gguf_add_tensor(fctx, layer.w3_b);
|
gguf_add_tensor(fctx, layer.ffn_up_b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1104,12 +1104,12 @@ static void save_as_llama_lora(const char * filename, struct my_llama_lora * lor
|
||||||
write_tensor(&file, layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, i, ".weight.loraB"));
|
write_tensor(&file, layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, i, ".weight.loraB"));
|
||||||
write_tensor(&file, layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraA"));
|
write_tensor(&file, layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraA"));
|
||||||
write_tensor(&file, layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraB"));
|
write_tensor(&file, layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraB"));
|
||||||
write_tensor(&file, layer.w1_a, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraA"));
|
write_tensor(&file, layer.ffn_gate_a, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraA"));
|
||||||
write_tensor(&file, layer.w1_b, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraB"));
|
write_tensor(&file, layer.ffn_gate_b, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraB"));
|
||||||
write_tensor(&file, layer.w2_a, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraA"));
|
write_tensor(&file, layer.ffn_down_a, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraA"));
|
||||||
write_tensor(&file, layer.w2_b, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraB"));
|
write_tensor(&file, layer.ffn_down_b, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraB"));
|
||||||
write_tensor(&file, layer.w3_a, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraA"));
|
write_tensor(&file, layer.ffn_up_a, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraA"));
|
||||||
write_tensor(&file, layer.w3_b, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraB"));
|
write_tensor(&file, layer.ffn_up_b, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraB"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1139,9 +1139,9 @@ struct train_params {
|
||||||
uint32_t n_rank_wv;
|
uint32_t n_rank_wv;
|
||||||
uint32_t n_rank_wo;
|
uint32_t n_rank_wo;
|
||||||
uint32_t n_rank_ffn_norm;
|
uint32_t n_rank_ffn_norm;
|
||||||
uint32_t n_rank_w1;
|
uint32_t n_rank_ffn_gate;
|
||||||
uint32_t n_rank_w2;
|
uint32_t n_rank_ffn_down;
|
||||||
uint32_t n_rank_w3;
|
uint32_t n_rank_ffn_up;
|
||||||
uint32_t n_rank_tok_embeddings;
|
uint32_t n_rank_tok_embeddings;
|
||||||
uint32_t n_rank_norm;
|
uint32_t n_rank_norm;
|
||||||
uint32_t n_rank_output;
|
uint32_t n_rank_output;
|
||||||
|
@ -1152,9 +1152,9 @@ struct train_params {
|
||||||
bool custom_n_rank_wv;
|
bool custom_n_rank_wv;
|
||||||
bool custom_n_rank_wo;
|
bool custom_n_rank_wo;
|
||||||
bool custom_n_rank_ffn_norm;
|
bool custom_n_rank_ffn_norm;
|
||||||
bool custom_n_rank_w1;
|
bool custom_n_rank_ffn_gate;
|
||||||
bool custom_n_rank_w2;
|
bool custom_n_rank_ffn_down;
|
||||||
bool custom_n_rank_w3;
|
bool custom_n_rank_ffn_up;
|
||||||
bool custom_n_rank_tok_embeddings;
|
bool custom_n_rank_tok_embeddings;
|
||||||
bool custom_n_rank_norm;
|
bool custom_n_rank_norm;
|
||||||
bool custom_n_rank_output;
|
bool custom_n_rank_output;
|
||||||
|
@ -1186,9 +1186,9 @@ static struct train_params get_default_train_params() {
|
||||||
params.n_rank_wv = 4;
|
params.n_rank_wv = 4;
|
||||||
params.n_rank_wo = 4;
|
params.n_rank_wo = 4;
|
||||||
params.n_rank_ffn_norm = 1;
|
params.n_rank_ffn_norm = 1;
|
||||||
params.n_rank_w1 = 4;
|
params.n_rank_ffn_gate = 4;
|
||||||
params.n_rank_w2 = 4;
|
params.n_rank_ffn_down = 4;
|
||||||
params.n_rank_w3 = 4;
|
params.n_rank_ffn_up = 4;
|
||||||
params.n_rank_tok_embeddings = 4;
|
params.n_rank_tok_embeddings = 4;
|
||||||
params.n_rank_norm = 1;
|
params.n_rank_norm = 1;
|
||||||
params.n_rank_output = 4;
|
params.n_rank_output = 4;
|
||||||
|
@ -1199,9 +1199,9 @@ static struct train_params get_default_train_params() {
|
||||||
params.custom_n_rank_wv = false;
|
params.custom_n_rank_wv = false;
|
||||||
params.custom_n_rank_wo = false;
|
params.custom_n_rank_wo = false;
|
||||||
params.custom_n_rank_ffn_norm = false;
|
params.custom_n_rank_ffn_norm = false;
|
||||||
params.custom_n_rank_w1 = false;
|
params.custom_n_rank_ffn_gate = false;
|
||||||
params.custom_n_rank_w2 = false;
|
params.custom_n_rank_ffn_down = false;
|
||||||
params.custom_n_rank_w3 = false;
|
params.custom_n_rank_ffn_up = false;
|
||||||
params.custom_n_rank_tok_embeddings = false;
|
params.custom_n_rank_tok_embeddings = false;
|
||||||
params.custom_n_rank_norm = false;
|
params.custom_n_rank_norm = false;
|
||||||
params.custom_n_rank_output = false;
|
params.custom_n_rank_output = false;
|
||||||
|
@ -1232,9 +1232,9 @@ static void train_print_usage(int argc, char ** argv, const struct train_params
|
||||||
fprintf(stderr, " --rank-wk N LORA rank for wk tensor, overrides default rank.\n");
|
fprintf(stderr, " --rank-wk N LORA rank for wk tensor, overrides default rank.\n");
|
||||||
fprintf(stderr, " --rank-wv N LORA rank for wv tensor, overrides default rank.\n");
|
fprintf(stderr, " --rank-wv N LORA rank for wv tensor, overrides default rank.\n");
|
||||||
fprintf(stderr, " --rank-wo N LORA rank for wo tensor, overrides default rank.\n");
|
fprintf(stderr, " --rank-wo N LORA rank for wo tensor, overrides default rank.\n");
|
||||||
fprintf(stderr, " --rank-w1 N LORA rank for w1 tensor, overrides default rank.\n");
|
fprintf(stderr, " --rank-ffn_gate N LORA rank for ffn_gate tensor, overrides default rank.\n");
|
||||||
fprintf(stderr, " --rank-w2 N LORA rank for w2 tensor, overrides default rank.\n");
|
fprintf(stderr, " --rank-ffn_down N LORA rank for ffn_down tensor, overrides default rank.\n");
|
||||||
fprintf(stderr, " --rank-w3 N LORA rank for w3 tensor, overrides default rank.\n");
|
fprintf(stderr, " --rank-ffn_up N LORA rank for ffn_up tensor, overrides default rank.\n");
|
||||||
|
|
||||||
print_common_train_usage(argc, argv, ¶ms->common);
|
print_common_train_usage(argc, argv, ¶ms->common);
|
||||||
}
|
}
|
||||||
|
@ -1369,27 +1369,27 @@ static bool train_params_parse(int argc, char ** argv, struct train_params * par
|
||||||
}
|
}
|
||||||
params->n_rank_wo = std::stoi(argv[i]);
|
params->n_rank_wo = std::stoi(argv[i]);
|
||||||
params->custom_n_rank_wo = true;
|
params->custom_n_rank_wo = true;
|
||||||
} else if (arg == "--rank-w1") {
|
} else if (arg == "--rank-ffn_gate") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params->n_rank_w1 = std::stoi(argv[i]);
|
params->n_rank_ffn_gate = std::stoi(argv[i]);
|
||||||
params->custom_n_rank_w1 = true;
|
params->custom_n_rank_ffn_gate = true;
|
||||||
} else if (arg == "--rank-w2") {
|
} else if (arg == "--rank-ffn_down") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params->n_rank_w2 = std::stoi(argv[i]);
|
params->n_rank_ffn_down = std::stoi(argv[i]);
|
||||||
params->custom_n_rank_w2 = true;
|
params->custom_n_rank_ffn_down = true;
|
||||||
} else if (arg == "--rank-w3") {
|
} else if (arg == "--rank-ffn_up") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params->n_rank_w3 = std::stoi(argv[i]);
|
params->n_rank_ffn_up = std::stoi(argv[i]);
|
||||||
params->custom_n_rank_w3 = true;
|
params->custom_n_rank_ffn_up = true;
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
train_print_usage(argc, argv, &default_params);
|
train_print_usage(argc, argv, &default_params);
|
||||||
|
@ -1452,12 +1452,12 @@ static int64_t get_parameter_count(struct my_llama_lora* lora) {
|
||||||
nx += ggml_nelements(layer.wo_b);
|
nx += ggml_nelements(layer.wo_b);
|
||||||
nx += ggml_nelements(layer.ffn_norm_a);
|
nx += ggml_nelements(layer.ffn_norm_a);
|
||||||
nx += ggml_nelements(layer.ffn_norm_b);
|
nx += ggml_nelements(layer.ffn_norm_b);
|
||||||
nx += ggml_nelements(layer.w1_a);
|
nx += ggml_nelements(layer.ffn_gate_a);
|
||||||
nx += ggml_nelements(layer.w1_b);
|
nx += ggml_nelements(layer.ffn_gate_b);
|
||||||
nx += ggml_nelements(layer.w2_a);
|
nx += ggml_nelements(layer.ffn_down_a);
|
||||||
nx += ggml_nelements(layer.w2_b);
|
nx += ggml_nelements(layer.ffn_down_b);
|
||||||
nx += ggml_nelements(layer.w3_a);
|
nx += ggml_nelements(layer.ffn_up_a);
|
||||||
nx += ggml_nelements(layer.w3_b);
|
nx += ggml_nelements(layer.ffn_up_b);
|
||||||
}
|
}
|
||||||
return nx;
|
return nx;
|
||||||
}
|
}
|
||||||
|
@ -1511,9 +1511,9 @@ int main(int argc, char ** argv) {
|
||||||
uint32_t n_rank_wv = params.custom_n_rank_wv ? params.n_rank_wv : params.lora_r;
|
uint32_t n_rank_wv = params.custom_n_rank_wv ? params.n_rank_wv : params.lora_r;
|
||||||
uint32_t n_rank_wo = params.custom_n_rank_wo ? params.n_rank_wo : params.lora_r;
|
uint32_t n_rank_wo = params.custom_n_rank_wo ? params.n_rank_wo : params.lora_r;
|
||||||
uint32_t n_rank_ffn_norm = params.custom_n_rank_ffn_norm ? params.n_rank_ffn_norm : 1;
|
uint32_t n_rank_ffn_norm = params.custom_n_rank_ffn_norm ? params.n_rank_ffn_norm : 1;
|
||||||
uint32_t n_rank_w1 = params.custom_n_rank_w1 ? params.n_rank_w1 : params.lora_r;
|
uint32_t n_rank_ffn_gate = params.custom_n_rank_ffn_gate ? params.n_rank_ffn_gate : params.lora_r;
|
||||||
uint32_t n_rank_w2 = params.custom_n_rank_w2 ? params.n_rank_w2 : params.lora_r;
|
uint32_t n_rank_ffn_down = params.custom_n_rank_ffn_down ? params.n_rank_ffn_down : params.lora_r;
|
||||||
uint32_t n_rank_w3 = params.custom_n_rank_w3 ? params.n_rank_w3 : params.lora_r;
|
uint32_t n_rank_ffn_up = params.custom_n_rank_ffn_up ? params.n_rank_ffn_up : params.lora_r;
|
||||||
uint32_t n_rank_tok_embeddings = params.custom_n_rank_tok_embeddings ? params.n_rank_tok_embeddings : params.lora_r;
|
uint32_t n_rank_tok_embeddings = params.custom_n_rank_tok_embeddings ? params.n_rank_tok_embeddings : params.lora_r;
|
||||||
uint32_t n_rank_norm = params.custom_n_rank_norm ? params.n_rank_norm : 1;
|
uint32_t n_rank_norm = params.custom_n_rank_norm ? params.n_rank_norm : 1;
|
||||||
uint32_t n_rank_output = params.custom_n_rank_output ? params.n_rank_output : params.lora_r;
|
uint32_t n_rank_output = params.custom_n_rank_output ? params.n_rank_output : params.lora_r;
|
||||||
|
@ -1523,9 +1523,9 @@ int main(int argc, char ** argv) {
|
||||||
lora.hparams.n_rank_wv = n_rank_wv;
|
lora.hparams.n_rank_wv = n_rank_wv;
|
||||||
lora.hparams.n_rank_wo = n_rank_wo;
|
lora.hparams.n_rank_wo = n_rank_wo;
|
||||||
lora.hparams.n_rank_ffn_norm = n_rank_ffn_norm;
|
lora.hparams.n_rank_ffn_norm = n_rank_ffn_norm;
|
||||||
lora.hparams.n_rank_w1 = n_rank_w1;
|
lora.hparams.n_rank_ffn_gate = n_rank_ffn_gate;
|
||||||
lora.hparams.n_rank_w2 = n_rank_w2;
|
lora.hparams.n_rank_ffn_down = n_rank_ffn_down;
|
||||||
lora.hparams.n_rank_w3 = n_rank_w3;
|
lora.hparams.n_rank_ffn_up = n_rank_ffn_up;
|
||||||
lora.hparams.n_rank_tok_embeddings = n_rank_tok_embeddings;
|
lora.hparams.n_rank_tok_embeddings = n_rank_tok_embeddings;
|
||||||
lora.hparams.n_rank_norm = n_rank_norm;
|
lora.hparams.n_rank_norm = n_rank_norm;
|
||||||
lora.hparams.n_rank_output = n_rank_output;
|
lora.hparams.n_rank_output = n_rank_output;
|
||||||
|
@ -1566,9 +1566,9 @@ int main(int argc, char ** argv) {
|
||||||
|| (lora.hparams.n_rank_wv != n_rank_wv)
|
|| (lora.hparams.n_rank_wv != n_rank_wv)
|
||||||
|| (lora.hparams.n_rank_wo != n_rank_wo)
|
|| (lora.hparams.n_rank_wo != n_rank_wo)
|
||||||
|| (lora.hparams.n_rank_ffn_norm != n_rank_ffn_norm)
|
|| (lora.hparams.n_rank_ffn_norm != n_rank_ffn_norm)
|
||||||
|| (lora.hparams.n_rank_w1 != n_rank_w1)
|
|| (lora.hparams.n_rank_ffn_gate != n_rank_ffn_gate)
|
||||||
|| (lora.hparams.n_rank_w2 != n_rank_w2)
|
|| (lora.hparams.n_rank_ffn_down != n_rank_ffn_down)
|
||||||
|| (lora.hparams.n_rank_w3 != n_rank_w3)
|
|| (lora.hparams.n_rank_ffn_up != n_rank_ffn_up)
|
||||||
|| (lora.hparams.n_rank_tok_embeddings != n_rank_tok_embeddings)
|
|| (lora.hparams.n_rank_tok_embeddings != n_rank_tok_embeddings)
|
||||||
|| (lora.hparams.n_rank_norm != n_rank_norm)
|
|| (lora.hparams.n_rank_norm != n_rank_norm)
|
||||||
|| (lora.hparams.n_rank_output != n_rank_output)
|
|| (lora.hparams.n_rank_output != n_rank_output)
|
||||||
|
|
|
@ -569,7 +569,8 @@ int main(int argc, char ** argv) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
|
|
|
@ -203,7 +203,8 @@ int main(int argc, char ** argv) {
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
|
|
||||||
LOG("%s: llama backend init\n", __func__);
|
LOG("%s: llama backend init\n", __func__);
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
|
@ -1152,8 +1152,7 @@ int main(int argc, char ** argv) {
|
||||||
if (!params.verbose) {
|
if (!params.verbose) {
|
||||||
llama_log_set(llama_null_log_callback, NULL);
|
llama_log_set(llama_null_log_callback, NULL);
|
||||||
}
|
}
|
||||||
bool numa = false;
|
llama_backend_init();
|
||||||
llama_backend_init(numa);
|
|
||||||
|
|
||||||
// initialize printer
|
// initialize printer
|
||||||
std::unique_ptr<printer> p;
|
std::unique_ptr<printer> p;
|
||||||
|
|
|
@ -274,8 +274,8 @@ Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint emb
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject, jboolean numa) {
|
Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) {
|
||||||
llama_backend_init(numa);
|
llama_backend_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
|
|
|
@ -51,7 +51,7 @@ actor LlamaContext {
|
||||||
}
|
}
|
||||||
|
|
||||||
static func create_context(path: String) throws -> LlamaContext {
|
static func create_context(path: String) throws -> LlamaContext {
|
||||||
llama_backend_init(false)
|
llama_backend_init()
|
||||||
var model_params = llama_model_default_params()
|
var model_params = llama_model_default_params()
|
||||||
|
|
||||||
#if targetEnvironment(simulator)
|
#if targetEnvironment(simulator)
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
# LLaVA
|
# LLaVA
|
||||||
|
|
||||||
Currently this implementation supports [llava-v1.5](https://huggingface.co/liuhaotian/llava-v1.5-7b) variants.
|
Currently this implementation supports [llava-v1.5](https://huggingface.co/liuhaotian/llava-v1.5-7b) variants,
|
||||||
|
as well as llava-1.6 [llava-v1.6](https://huggingface.co/collections/liuhaotian/llava-16-65b9e40155f60fd046a5ccf2) variants.
|
||||||
|
|
||||||
The pre-converted [7b](https://huggingface.co/mys/ggml_llava-v1.5-7b)
|
The pre-converted [7b](https://huggingface.co/mys/ggml_llava-v1.5-7b)
|
||||||
and [13b](https://huggingface.co/mys/ggml_llava-v1.5-13b)
|
and [13b](https://huggingface.co/mys/ggml_llava-v1.5-13b)
|
||||||
models are available.
|
models are available.
|
||||||
|
For llava-1.6 a variety of prepared gguf models are available as well [7b-34b](https://huggingface.co/cmp-nct/llava-1.6-gguf)
|
||||||
|
|
||||||
After API is confirmed, more models will be supported / uploaded.
|
After API is confirmed, more models will be supported / uploaded.
|
||||||
|
|
||||||
|
@ -18,10 +20,11 @@ After building, run: `./llava-cli` to see the usage. For example:
|
||||||
```
|
```
|
||||||
|
|
||||||
**note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so.
|
**note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so.
|
||||||
|
**note**: For GPU offloading ensure to use the `-ngl` flag just like usual
|
||||||
|
|
||||||
## Model conversion
|
## LLaVA 1.5
|
||||||
|
|
||||||
- Clone `llava-v15-7b` and `clip-vit-large-patch14-336` locally:
|
- Clone a LLaVA and a CLIP model ([available options](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)). For example:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
git clone https://huggingface.co/liuhaotian/llava-v1.5-7b
|
git clone https://huggingface.co/liuhaotian/llava-v1.5-7b
|
||||||
|
@ -55,8 +58,49 @@ python ./convert.py ../llava-v1.5-7b
|
||||||
|
|
||||||
Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` directory.
|
Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` directory.
|
||||||
|
|
||||||
|
## LLaVA 1.6 gguf conversion
|
||||||
|
|
||||||
|
1) Backup your pth/safetensor model files as llava-surgery modifies them
|
||||||
|
2) Use `python llava-surgery-v2.py -C -m /path/to/hf-model` which also supports llava-1.5 variants pytorch as well as safetensor models:
|
||||||
|
- you will find a llava.projector and a llava.clip file in your model directory
|
||||||
|
3) Copy the llava.clip file into a subdirectory (like vit), rename it to pytorch_model.bin and add a fitting vit configuration to the directory (https://huggingface.co/cmp-nct/llava-1.6-gguf/blob/main/config_vit.json) and rename it to config.json.
|
||||||
|
4) Create the visual gguf model: `python ./examples/llava/convert-image-encoder-to-gguf.py -m ../path/to/vit --llava-projector ../path/to/llava.projector --output-dir ../path/to/output --clip-model-is-vision`
|
||||||
|
- This is similar to llava-1.5, the difference is that we tell the encoder that we are working with the pure vision model part of CLIP
|
||||||
|
5) Everything else as usual: convert.py the hf model, quantize as needed
|
||||||
|
**note** llava-1.6 needs more context than llava-1.5, at least 3000 is needed (just run it at -c 4096)
|
||||||
|
**note** llava-1.6 greatly benefits from batched prompt processing (defaults work)
|
||||||
|
|
||||||
|
## llava-cli templating and llava-1.6 prompting
|
||||||
|
|
||||||
|
llava-1.5 models all use the same vicuna prompt, here you can just add your image question like `-p "Provide a full description."`
|
||||||
|
For llava-1.5 models which are not vicuna (mistral and Yi) you need to adapt system prompt as well as user prompt, for this purpose llava-cli has a basic templating system:
|
||||||
|
|
||||||
|
**For Mistral and using llava-cli binary:**
|
||||||
|
Add this: `-p "<image>\nUSER:\nProvide a full description.\nASSISTANT:\n"`
|
||||||
|
The mistral template for llava-1.6 seems to be no system print and a USER/ASSISTANT role
|
||||||
|
|
||||||
|
**For the 34B this should work:**
|
||||||
|
Add this: `-e -p <|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nProvide a full description.<|im_end|><|im_start|>assistant\n`
|
||||||
|
|
||||||
|
|
||||||
|
## How to know if you are running in llava-1.5 or llava-1.6 mode
|
||||||
|
|
||||||
|
When running llava-cli you will see a visual information right before the prompt is being processed:
|
||||||
|
|
||||||
|
**Llava-1.5:**
|
||||||
|
`encode_image_with_clip: image embedding created: 576 tokens`
|
||||||
|
|
||||||
|
**Llava-1.6 (anything above 576):**
|
||||||
|
`encode_image_with_clip: image embedding created: 2880 tokens`
|
||||||
|
|
||||||
|
|
||||||
|
Alternatively just pay notice to how many "tokens" have been used for your prompt, it will also show 1000+ tokens for llava-1.6
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
|
|
||||||
- [ ] Support non-CPU backend for the image encoding part.
|
- [x] Support non-CPU backend for the image encoding part.
|
||||||
- [ ] Support different sampling methods.
|
- [ ] Support different sampling methods.
|
||||||
- [ ] Support more model variants.
|
- [ ] Support more model variants.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// NOTE: This is modified from clip.cpp only for LLaVA,
|
// NOTE: This is modified from clip.cpp only for LLaVA,
|
||||||
// so there might be still unnecessary artifacts hanging around
|
// so there might be still unnecessary artifacts hanging around
|
||||||
// I'll gradually clean and extend it
|
// I'll gradually clean and extend it
|
||||||
|
// Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
|
||||||
#include "clip.h"
|
#include "clip.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml-alloc.h"
|
#include "ggml-alloc.h"
|
||||||
|
@ -30,6 +30,26 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
//#define CLIP_DEBUG_FUNCTIONS
|
||||||
|
|
||||||
|
// RGB uint8 image
|
||||||
|
struct clip_image_u8 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<uint8_t> buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
// RGB float32 image (NHWC)
|
||||||
|
// Memory layout: RGBRGBRGB...
|
||||||
|
struct clip_image_f32 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<float> buf;
|
||||||
|
};
|
||||||
|
|
||||||
static std::string format(const char * fmt, ...) {
|
static std::string format(const char * fmt, ...) {
|
||||||
va_list ap;
|
va_list ap;
|
||||||
|
@ -71,6 +91,11 @@ static std::string format(const char * fmt, ...) {
|
||||||
#define KEY_IMAGE_STD "clip.vision.image_std"
|
#define KEY_IMAGE_STD "clip.vision.image_std"
|
||||||
#define KEY_PROJ_TYPE "clip.projector_type"
|
#define KEY_PROJ_TYPE "clip.projector_type"
|
||||||
|
|
||||||
|
#define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type"
|
||||||
|
#define KEY_IMAGE_GRID_PINPOINTS "clip.vision.image_grid_pinpoints"
|
||||||
|
#define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution"
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// tensor name constants
|
// tensor name constants
|
||||||
//
|
//
|
||||||
|
@ -94,6 +119,7 @@ static std::string format(const char * fmt, ...) {
|
||||||
#define TN_LLAVA_PROJ "mm.%d.%s"
|
#define TN_LLAVA_PROJ "mm.%d.%s"
|
||||||
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
|
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
|
||||||
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
|
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
|
||||||
|
#define TN_IMAGE_NEWLINE "model.image_newline"
|
||||||
|
|
||||||
|
|
||||||
enum projector_type {
|
enum projector_type {
|
||||||
|
@ -165,7 +191,6 @@ static std::string gguf_data_to_str(enum gguf_type type, const void * data, int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
||||||
std::string result;
|
std::string result;
|
||||||
for (size_t pos = 0; ; pos += search.length()) {
|
for (size_t pos = 0; ; pos += search.length()) {
|
||||||
|
@ -217,7 +242,7 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") {
|
static void print_tensor_info(const ggml_tensor * tensor, const char * prefix = "") {
|
||||||
size_t tensor_size = ggml_nbytes(tensor);
|
size_t tensor_size = ggml_nbytes(tensor);
|
||||||
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
|
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
|
||||||
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
|
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
|
||||||
|
@ -233,31 +258,136 @@ static projector_type clip_projector_type_from_string(const std::string & name)
|
||||||
return PROJECTOR_TYPE_UNKNOWN;
|
return PROJECTOR_TYPE_UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
#ifdef CLIP_DEBUG_FUNCTIONS
|
||||||
// image data
|
static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
|
||||||
//
|
std::ofstream file(filename, std::ios::binary);
|
||||||
|
if (!file.is_open()) {
|
||||||
|
std::cerr << "Failed to open file for writing: " << filename << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// RGB uint8 image
|
// PPM header: P6 format, width, height, and max color value
|
||||||
struct clip_image_u8 {
|
file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
|
||||||
int nx;
|
|
||||||
int ny;
|
|
||||||
|
|
||||||
std::vector<uint8_t> buf;
|
// Write pixel data
|
||||||
};
|
for (size_t i = 0; i < img.buf.size(); i += 3) {
|
||||||
|
// PPM expects binary data in RGB format, which matches our image buffer
|
||||||
|
file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
|
||||||
|
}
|
||||||
|
|
||||||
// RGB float32 image (NHWC)
|
file.close();
|
||||||
// Memory layout: RGBRGBRGB...
|
}
|
||||||
struct clip_image_f32 {
|
|
||||||
int nx;
|
static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
|
||||||
int ny;
|
std::ofstream file(filename, std::ios::binary);
|
||||||
|
if (!file.is_open()) {
|
||||||
|
std::cerr << "Failed to open file for writing: " << filename << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
|
||||||
|
int bytesPerPixel = 3;
|
||||||
|
int widthInBytes = img.nx * bytesPerPixel;
|
||||||
|
int paddingAmount = (4 - (widthInBytes % 4)) % 4;
|
||||||
|
int stride = widthInBytes + paddingAmount;
|
||||||
|
|
||||||
|
// Bitmap file header
|
||||||
|
unsigned char fileHeader[14] = {
|
||||||
|
'B','M', // Signature
|
||||||
|
0,0,0,0, // Image file size in bytes
|
||||||
|
0,0,0,0, // Reserved
|
||||||
|
54,0,0,0 // Start of pixel array
|
||||||
|
};
|
||||||
|
|
||||||
|
// Total file size
|
||||||
|
fileSize = 54 + (stride * img.ny);
|
||||||
|
fileHeader[2] = (unsigned char)(fileSize);
|
||||||
|
fileHeader[3] = (unsigned char)(fileSize >> 8);
|
||||||
|
fileHeader[4] = (unsigned char)(fileSize >> 16);
|
||||||
|
fileHeader[5] = (unsigned char)(fileSize >> 24);
|
||||||
|
|
||||||
|
// Bitmap information header (BITMAPINFOHEADER)
|
||||||
|
unsigned char infoHeader[40] = {
|
||||||
|
40,0,0,0, // Size of this header (40 bytes)
|
||||||
|
0,0,0,0, // Image width
|
||||||
|
0,0,0,0, // Image height
|
||||||
|
1,0, // Number of color planes
|
||||||
|
24,0, // Bits per pixel
|
||||||
|
0,0,0,0, // No compression
|
||||||
|
0,0,0,0, // Image size (can be 0 for no compression)
|
||||||
|
0,0,0,0, // X pixels per meter (not specified)
|
||||||
|
0,0,0,0, // Y pixels per meter (not specified)
|
||||||
|
0,0,0,0, // Total colors (color table not used)
|
||||||
|
0,0,0,0 // Important colors (all are important)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Width and height in the information header
|
||||||
|
infoHeader[4] = (unsigned char)(img.nx);
|
||||||
|
infoHeader[5] = (unsigned char)(img.nx >> 8);
|
||||||
|
infoHeader[6] = (unsigned char)(img.nx >> 16);
|
||||||
|
infoHeader[7] = (unsigned char)(img.nx >> 24);
|
||||||
|
infoHeader[8] = (unsigned char)(img.ny);
|
||||||
|
infoHeader[9] = (unsigned char)(img.ny >> 8);
|
||||||
|
infoHeader[10] = (unsigned char)(img.ny >> 16);
|
||||||
|
infoHeader[11] = (unsigned char)(img.ny >> 24);
|
||||||
|
|
||||||
|
// Write file headers
|
||||||
|
file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
|
||||||
|
file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
|
||||||
|
|
||||||
|
// Pixel data
|
||||||
|
std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
|
||||||
|
for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
|
||||||
|
for (int x = 0; x < img.nx; ++x) {
|
||||||
|
// Each pixel
|
||||||
|
size_t pixelIndex = (y * img.nx + x) * 3;
|
||||||
|
unsigned char pixel[3] = {
|
||||||
|
img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
|
||||||
|
img.buf[pixelIndex + 1],
|
||||||
|
img.buf[pixelIndex]
|
||||||
|
};
|
||||||
|
file.write(reinterpret_cast<char*>(pixel), 3);
|
||||||
|
}
|
||||||
|
// Write padding for the row
|
||||||
|
file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
|
||||||
|
}
|
||||||
|
|
||||||
|
file.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
// debug function to convert f32 to u8
|
||||||
|
static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
|
||||||
|
dst.nx = src.nx;
|
||||||
|
dst.ny = src.ny;
|
||||||
|
dst.buf.resize(3 * src.nx * src.ny);
|
||||||
|
for (size_t i = 0; i < src.buf.size(); ++i) {
|
||||||
|
dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
std::vector<float> buf;
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// clip layers
|
// clip layers
|
||||||
//
|
//
|
||||||
|
|
||||||
|
struct clip_hparams {
|
||||||
|
int32_t image_size;
|
||||||
|
int32_t patch_size;
|
||||||
|
int32_t hidden_size;
|
||||||
|
int32_t n_intermediate;
|
||||||
|
int32_t projection_dim;
|
||||||
|
int32_t n_head;
|
||||||
|
int32_t n_layer;
|
||||||
|
|
||||||
|
float eps;
|
||||||
|
|
||||||
|
char mm_patch_merge_type[32] = "flat"; // spatial_unpad or flat (default)
|
||||||
|
|
||||||
|
int32_t image_grid_pinpoints[32];
|
||||||
|
int32_t image_crop_resolution;
|
||||||
|
};
|
||||||
|
|
||||||
struct clip_layer {
|
struct clip_layer {
|
||||||
// attention
|
// attention
|
||||||
struct ggml_tensor * k_w;
|
struct ggml_tensor * k_w;
|
||||||
|
@ -287,7 +417,7 @@ struct clip_layer {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct clip_vision_model {
|
struct clip_vision_model {
|
||||||
struct clip_vision_hparams hparams;
|
struct clip_hparams hparams;
|
||||||
|
|
||||||
// embeddings
|
// embeddings
|
||||||
struct ggml_tensor * class_embedding;
|
struct ggml_tensor * class_embedding;
|
||||||
|
@ -310,6 +440,8 @@ struct clip_vision_model {
|
||||||
struct ggml_tensor * mm_2_w = NULL;
|
struct ggml_tensor * mm_2_w = NULL;
|
||||||
struct ggml_tensor * mm_2_b = NULL;
|
struct ggml_tensor * mm_2_b = NULL;
|
||||||
|
|
||||||
|
struct ggml_tensor * image_newline = NULL;
|
||||||
|
|
||||||
// Yi type models with mlp+normalization projection
|
// Yi type models with mlp+normalization projection
|
||||||
struct ggml_tensor * mm_1_w = NULL; // Yi type models have 0, 1, 3, 4
|
struct ggml_tensor * mm_1_w = NULL; // Yi type models have 0, 1, 3, 4
|
||||||
struct ggml_tensor * mm_1_b = NULL;
|
struct ggml_tensor * mm_1_b = NULL;
|
||||||
|
@ -366,6 +498,7 @@ struct clip_ctx {
|
||||||
// memory buffers to evaluate the model
|
// memory buffers to evaluate the model
|
||||||
ggml_backend_buffer_t params_buffer = NULL;
|
ggml_backend_buffer_t params_buffer = NULL;
|
||||||
ggml_backend_buffer_t compute_buffer = NULL;
|
ggml_backend_buffer_t compute_buffer = NULL;
|
||||||
|
|
||||||
ggml_backend_t backend = NULL;
|
ggml_backend_t backend = NULL;
|
||||||
ggml_gallocr_t compute_alloc = NULL;
|
ggml_gallocr_t compute_alloc = NULL;
|
||||||
};
|
};
|
||||||
|
@ -382,15 +515,16 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
const int image_size = hparams.image_size;
|
const int image_size = hparams.image_size;
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
|
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
|
||||||
const int num_positions = num_patches + 1;
|
const int num_positions = num_patches + 1;
|
||||||
const int hidden_size = hparams.hidden_size;
|
const int hidden_size = hparams.hidden_size;
|
||||||
const int n_head = hparams.n_head;
|
const int n_head = hparams.n_head;
|
||||||
const int d_head = hidden_size / n_head;
|
const int d_head = hidden_size / n_head;
|
||||||
const int n_layer = hparams.n_layer;
|
const int n_layer = hparams.n_layer;
|
||||||
//const int n_intermediate = hparams.n_intermediate;
|
|
||||||
//const int projection_dim = hparams.projection_dim;
|
|
||||||
const float eps = hparams.eps;
|
const float eps = hparams.eps;
|
||||||
int batch_size = imgs->size;
|
|
||||||
|
const int batch_size = imgs->size;
|
||||||
|
|
||||||
if (ctx->has_llava_projector) {
|
if (ctx->has_llava_projector) {
|
||||||
GGML_ASSERT(batch_size == 1);
|
GGML_ASSERT(batch_size == 1);
|
||||||
}
|
}
|
||||||
|
@ -540,7 +674,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
||||||
|
|
||||||
embeddings = ggml_gelu(ctx0, embeddings);
|
embeddings = ggml_gelu(ctx0, embeddings);
|
||||||
|
|
||||||
embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||||
embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||||
|
|
||||||
|
@ -791,10 +924,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
if (idx != -1) {
|
if (idx != -1) {
|
||||||
const std::string proj_type = gguf_get_val_str(ctx, idx);
|
const std::string proj_type = gguf_get_val_str(ctx, idx);
|
||||||
new_clip->proj_type = clip_projector_type_from_string(proj_type);
|
new_clip->proj_type = clip_projector_type_from_string(proj_type);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
new_clip->proj_type = PROJECTOR_TYPE_MLP;
|
new_clip->proj_type = PROJECTOR_TYPE_MLP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_clip->proj_type == PROJECTOR_TYPE_MLP) {
|
if (new_clip->proj_type == PROJECTOR_TYPE_MLP) {
|
||||||
if (gguf_find_tensor(ctx, format(TN_LLAVA_PROJ, 3, "weight").c_str()) != -1) {
|
if (gguf_find_tensor(ctx, format(TN_LLAVA_PROJ, 3, "weight").c_str()) != -1) {
|
||||||
new_clip->proj_type = PROJECTOR_TYPE_MLP_NORM;
|
new_clip->proj_type = PROJECTOR_TYPE_MLP_NORM;
|
||||||
|
@ -920,11 +1053,41 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision"));
|
hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision"));
|
||||||
hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision"));
|
hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision"));
|
||||||
|
|
||||||
|
try {
|
||||||
|
int idx = get_key_idx(ctx, KEY_IMAGE_GRID_PINPOINTS);
|
||||||
|
int n = gguf_get_arr_n(ctx, idx);
|
||||||
|
const int32_t * pinpoints = (const int32_t *)gguf_get_arr_data(ctx, idx);
|
||||||
|
for (int i = 0; i < 32 && i < n && pinpoints[i] != 0; ++i) {
|
||||||
|
hparams.image_grid_pinpoints[i] = pinpoints[i];
|
||||||
|
}
|
||||||
|
if (n < 32)
|
||||||
|
hparams.image_grid_pinpoints[n] = 0;
|
||||||
|
} catch (std::runtime_error & e) {
|
||||||
|
hparams.image_grid_pinpoints[0]=0;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
|
||||||
|
strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
|
||||||
|
} catch (std::runtime_error & e) {
|
||||||
|
strcpy(hparams.mm_patch_merge_type, "flat");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
|
||||||
|
} catch(const std::exception& e) {
|
||||||
|
hparams.image_crop_resolution = hparams.image_size;
|
||||||
|
}
|
||||||
|
|
||||||
int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN);
|
int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN);
|
||||||
int idx_std = get_key_idx(ctx, KEY_IMAGE_STD);
|
int idx_std = get_key_idx(ctx, KEY_IMAGE_STD);
|
||||||
|
|
||||||
|
const float * mean_data = (const float *)gguf_get_arr_data(ctx, idx_mean);
|
||||||
|
const float * std_data = (const float *)gguf_get_arr_data(ctx, idx_std);
|
||||||
|
|
||||||
for (int i = 0; i < 3; ++i) {
|
for (int i = 0; i < 3; ++i) {
|
||||||
new_clip->image_mean[i] = *((const float *)gguf_get_arr_data(ctx, idx_mean));
|
new_clip->image_mean[i] = mean_data[i];
|
||||||
new_clip->image_std[i] = *((const float *)gguf_get_arr_data(ctx, idx_std));
|
new_clip->image_std[i] = std_data[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (verbosity >= 2) {
|
if (verbosity >= 2) {
|
||||||
|
@ -936,13 +1099,27 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
printf("v_projection_dim %d\n", hparams.projection_dim);
|
printf("v_projection_dim %d\n", hparams.projection_dim);
|
||||||
printf("v_n_head %d\n", hparams.n_head);
|
printf("v_n_head %d\n", hparams.n_head);
|
||||||
printf("v_n_layer %d\n", hparams.n_layer);
|
printf("v_n_layer %d\n", hparams.n_layer);
|
||||||
|
printf("v_eps %f\n", hparams.eps);
|
||||||
|
printf("v_image_mean %f %f %f\n", new_clip->image_mean[0], new_clip->image_mean[1], new_clip->image_mean[2]);
|
||||||
|
printf("v_image_std %f %f %f\n", new_clip->image_std[0], new_clip->image_std[1], new_clip->image_std[2]);
|
||||||
|
printf("v_image_grid_pinpoints: ");
|
||||||
|
for (int i = 0; i < 32 && (hparams.image_grid_pinpoints[i] != 0); ++i) {
|
||||||
|
printf("%d ", hparams.image_grid_pinpoints[i]);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
printf("v_mm_patch_merge_type: %s\n", hparams.mm_patch_merge_type);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||||
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||||
|
} catch(const std::exception& e) {
|
||||||
|
fprintf(stderr, "%s: failed to load vision model tensors\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
// LLaVA projection
|
// LLaVA projection
|
||||||
if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||||
|
@ -968,8 +1145,11 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
|
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
|
||||||
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
|
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
|
||||||
} catch (std::runtime_error & e) { }
|
} catch (std::runtime_error & e) { }
|
||||||
}
|
try {
|
||||||
else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
|
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
|
||||||
|
// fprintf(stderr, "%s: image_newline tensor (llava-1.6) found\n", __func__);
|
||||||
|
} catch (std::runtime_error & e) { }
|
||||||
|
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
|
||||||
// MobileVLM projection
|
// MobileVLM projection
|
||||||
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
||||||
vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias"));
|
vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias"));
|
||||||
|
@ -995,13 +1175,13 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
|
vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
|
||||||
vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
|
vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
|
||||||
vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
|
vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
|
std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
|
||||||
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
vision_model.layers.resize(hparams.n_layer);
|
vision_model.layers.resize(hparams.n_layer);
|
||||||
|
|
||||||
for (int il = 0; il < hparams.n_layer; ++il) {
|
for (int il = 0; il < hparams.n_layer; ++il) {
|
||||||
auto & layer = vision_model.layers[il];
|
auto & layer = vision_model.layers[il];
|
||||||
layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight"));
|
layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight"));
|
||||||
|
@ -1050,8 +1230,20 @@ struct clip_image_f32 * clip_image_f32_init() {
|
||||||
return new clip_image_f32();
|
return new clip_image_f32();
|
||||||
}
|
}
|
||||||
|
|
||||||
void clip_image_u8_free (struct clip_image_u8 * img) { delete img; }
|
void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
|
||||||
void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
|
void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
|
||||||
|
void clip_image_u8_batch_free(struct clip_image_u8_batch & batch) {
|
||||||
|
if (batch.size > 0) {
|
||||||
|
delete[] batch.data;
|
||||||
|
batch.size = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void clip_image_f32_batch_free(struct clip_image_f32_batch & batch) {
|
||||||
|
if (batch.size > 0) {
|
||||||
|
delete[] batch.data;
|
||||||
|
batch.size = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) {
|
static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) {
|
||||||
img->nx = nx;
|
img->nx = nx;
|
||||||
|
@ -1084,24 +1276,252 @@ bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// normalize: x = (x - mean) / std
|
// Linear interpolation between two points
|
||||||
// TODO: implement bicubic interpolation instead of linear.
|
inline float lerp(float s, float e, float t) {
|
||||||
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32 * res, const bool pad2square) {
|
return s + (e - s) * t;
|
||||||
|
}
|
||||||
|
// Bilinear resize function
|
||||||
|
static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
|
||||||
|
dst.nx = target_width;
|
||||||
|
dst.ny = target_height;
|
||||||
|
dst.buf.resize(3 * target_width * target_height);
|
||||||
|
|
||||||
|
float x_ratio = static_cast<float>(src.nx - 1) / target_width;
|
||||||
|
float y_ratio = static_cast<float>(src.ny - 1) / target_height;
|
||||||
|
|
||||||
|
for (int y = 0; y < target_height; y++) {
|
||||||
|
for (int x = 0; x < target_width; x++) {
|
||||||
|
float px = x_ratio * x;
|
||||||
|
float py = y_ratio * y;
|
||||||
|
int x_floor = static_cast<int>(px);
|
||||||
|
int y_floor = static_cast<int>(py);
|
||||||
|
float x_lerp = px - x_floor;
|
||||||
|
float y_lerp = py - y_floor;
|
||||||
|
|
||||||
|
for (int c = 0; c < 3; c++) {
|
||||||
|
float top = lerp(
|
||||||
|
static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
|
||||||
|
static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
|
||||||
|
x_lerp
|
||||||
|
);
|
||||||
|
float bottom = lerp(
|
||||||
|
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
|
||||||
|
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
|
||||||
|
x_lerp
|
||||||
|
);
|
||||||
|
dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
|
||||||
|
static void normalize_image_u8_to_f32(const clip_image_u8* src, clip_image_f32* dst, const float mean[3], const float std[3]) {
|
||||||
|
dst->nx = src->nx;
|
||||||
|
dst->ny = src->ny;
|
||||||
|
dst->buf.resize(src->buf.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < src->buf.size(); ++i) {
|
||||||
|
int c = i % 3; // rgb
|
||||||
|
dst->buf[i] = (static_cast<float>(src->buf[i]) / 255.0f - mean[c]) / std[c];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline float clip(float x, float lower, float upper) {
|
||||||
|
return std::max(lower, std::min(x, upper));
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool bicubic_resize(const clip_image_u8 &img, clip_image_u8 &dst, int target_width, int target_height) {
|
||||||
|
const int nx = img.nx;
|
||||||
|
const int ny = img.ny;
|
||||||
|
|
||||||
|
dst.nx = target_width;
|
||||||
|
dst.ny = target_height;
|
||||||
|
dst.buf.resize(3 * target_width * target_height);
|
||||||
|
|
||||||
|
float Cc;
|
||||||
|
float C[5];
|
||||||
|
float d0, d2, d3, a0, a1, a2, a3;
|
||||||
|
int i, j, k, jj;
|
||||||
|
int x, y;
|
||||||
|
float dx, dy;
|
||||||
|
float tx, ty;
|
||||||
|
|
||||||
|
tx = (float)nx / (float)target_width;
|
||||||
|
ty = (float)ny / (float)target_height;
|
||||||
|
|
||||||
|
// Bicubic interpolation; adapted from ViT.cpp, inspired from :
|
||||||
|
// -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
|
||||||
|
// -> https://en.wikipedia.org/wiki/Bicubic_interpolation
|
||||||
|
|
||||||
|
for (i = 0; i < target_height; i++) {
|
||||||
|
for (j = 0; j < target_width; j++) {
|
||||||
|
x = (int)(tx * j);
|
||||||
|
y = (int)(ty * i);
|
||||||
|
|
||||||
|
dx = tx * j - x;
|
||||||
|
dy = ty * i - y;
|
||||||
|
|
||||||
|
for (k = 0; k < 3; k++) {
|
||||||
|
for (jj = 0; jj <= 3; jj++) {
|
||||||
|
d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
|
||||||
|
d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
|
||||||
|
d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
|
||||||
|
a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
|
||||||
|
|
||||||
|
a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
|
||||||
|
a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
|
||||||
|
a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
|
||||||
|
|
||||||
|
C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
|
||||||
|
|
||||||
|
d0 = C[0] - C[1];
|
||||||
|
d2 = C[2] - C[1];
|
||||||
|
d3 = C[3] - C[1];
|
||||||
|
a0 = C[1];
|
||||||
|
a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
|
||||||
|
a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
|
||||||
|
a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
|
||||||
|
Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
|
||||||
|
|
||||||
|
const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
|
||||||
|
dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// llava-1.6 type of resize_and_pad (black)
|
||||||
|
static void resize_and_pad_image(const clip_image_u8& image, clip_image_u8 &image_output, const std::pair<int, int>& target_resolution) {
|
||||||
|
int target_width = target_resolution.first;
|
||||||
|
int target_height = target_resolution.second;
|
||||||
|
|
||||||
|
float scale_w = static_cast<float>(target_width) / image.nx;
|
||||||
|
float scale_h = static_cast<float>(target_height) / image.ny;
|
||||||
|
|
||||||
|
int new_width, new_height;
|
||||||
|
|
||||||
|
if (scale_w < scale_h) {
|
||||||
|
new_width = target_width;
|
||||||
|
new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
|
||||||
|
} else {
|
||||||
|
new_height = target_height;
|
||||||
|
new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
|
||||||
|
}
|
||||||
|
|
||||||
|
clip_image_u8 resized_image;
|
||||||
|
// bilinear_resize(image, resized_image, new_width, new_height);
|
||||||
|
bicubic_resize(image, resized_image, new_width, new_height);
|
||||||
|
|
||||||
|
clip_image_u8 padded_image;
|
||||||
|
padded_image.nx = target_width;
|
||||||
|
padded_image.ny = target_height;
|
||||||
|
padded_image.buf.resize(3 * target_width * target_height, 0); // Initialize with black
|
||||||
|
|
||||||
|
// Calculate padding offsets
|
||||||
|
int pad_x = (target_width - new_width) / 2;
|
||||||
|
int pad_y = (target_height - new_height) / 2;
|
||||||
|
|
||||||
|
// Copy the resized image into the center of the padded buffer
|
||||||
|
for (int y = 0; y < new_height; ++y) {
|
||||||
|
for (int x = 0; x < new_width; ++x) {
|
||||||
|
for (int c = 0; c < 3; ++c) {
|
||||||
|
padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
image_output = std::move(padded_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Selects the best resolution from a list of possible resolutions based on the original size.
|
||||||
|
*
|
||||||
|
* @param original_size The original size of the image in the format (width, height).
|
||||||
|
* @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
|
||||||
|
* @return The best fit resolution in the format (width, height).
|
||||||
|
*/
|
||||||
|
static std::pair<int, int> select_best_resolution(const std::pair<int, int> & original_size, const std::vector<std::pair<int, int>> & possible_resolutions) {
|
||||||
|
int original_width = original_size.first;
|
||||||
|
int original_height = original_size.second;
|
||||||
|
std::pair<int, int> best_fit;
|
||||||
|
int max_effective_resolution = 0;
|
||||||
|
int min_wasted_resolution = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
|
for (const auto& resolution : possible_resolutions) {
|
||||||
|
int width = resolution.first;
|
||||||
|
int height = resolution.second;
|
||||||
|
float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
|
||||||
|
int downscaled_width = static_cast<int>(original_width * scale);
|
||||||
|
int downscaled_height = static_cast<int>(original_height * scale);
|
||||||
|
int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
|
||||||
|
int wasted_resolution = (width * height) - effective_resolution;
|
||||||
|
// fprintf(stderr, "resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
|
||||||
|
if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
|
||||||
|
max_effective_resolution = effective_resolution;
|
||||||
|
min_wasted_resolution = wasted_resolution;
|
||||||
|
best_fit = resolution;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return best_fit;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::vector<clip_image_u8*> divide_to_patches_u8(const clip_image_u8 & image, int patch_size) {
|
||||||
|
std::vector<clip_image_u8*> patches;
|
||||||
|
int width = image.nx;
|
||||||
|
int height = image.ny;
|
||||||
|
for (int i = 0; i < height; i += patch_size) {
|
||||||
|
for (int j = 0; j < width; j += patch_size) {
|
||||||
|
clip_image_u8 *patch = clip_image_u8_init();
|
||||||
|
patch->nx = std::min(patch_size, width - j);
|
||||||
|
patch->ny = std::min(patch_size, height - i);
|
||||||
|
patch->buf.resize(3 * patch->nx * patch->ny);
|
||||||
|
for (int y = 0; y < patch->ny; ++y) {
|
||||||
|
for (int x = 0; x < patch->nx; ++x) {
|
||||||
|
for (int c = 0; c < 3; ++c) {
|
||||||
|
patch->buf[3 * (y * patch->nx + x) + c] = image.buf[3 * ((i + y) * width + (j + x)) + c];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
patches.push_back(patch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return patches;
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
|
||||||
|
// res_imgs memory is being allocated here, previous allocations will be freed if found
|
||||||
|
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch & res_imgs) {
|
||||||
|
bool pad_to_square = true;
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
auto & params = ctx->vision_model.hparams;
|
||||||
|
// The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
|
||||||
|
if (strcmp(params.mm_patch_merge_type, "spatial_unpad") == 0) {
|
||||||
|
pad_to_square = false;
|
||||||
|
}
|
||||||
|
// free the previous res_imgs if any set
|
||||||
|
if (res_imgs.size > 0) {
|
||||||
|
clip_image_f32_batch_free(res_imgs);
|
||||||
|
}
|
||||||
|
res_imgs.data = nullptr;
|
||||||
|
res_imgs.size = 0;
|
||||||
|
|
||||||
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
|
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
|
||||||
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
|
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
|
||||||
|
|
||||||
clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily
|
clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily
|
||||||
if (pad2square && img->nx != img->ny) {
|
if (pad_to_square && img->nx != img->ny) {
|
||||||
int longer_side = std::max(img->nx, img->ny);
|
int longer_side = std::max(img->nx, img->ny);
|
||||||
temp->nx = longer_side;
|
temp->nx = longer_side;
|
||||||
temp->ny = longer_side;
|
temp->ny = longer_side;
|
||||||
temp->buf.resize(3 * longer_side * longer_side);
|
temp->buf.resize(3 * longer_side * longer_side);
|
||||||
const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA
|
const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA (this is the mean rgb color * 255)
|
||||||
|
|
||||||
// fill with background color
|
// fill with background color
|
||||||
for (size_t i = 0; i < temp->buf.size(); i++) {
|
for (size_t i = 0; i < temp->buf.size(); i++) {
|
||||||
|
@ -1118,19 +1538,64 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli
|
||||||
temp->buf[j+2] = img->buf[i+2];
|
temp->buf[j+2] = img->buf[i+2];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (params.image_grid_pinpoints[0] != 0) {
|
||||||
|
// "spatial_unpad" with "anyres" processing for llava-1.6
|
||||||
|
std::vector<std::pair<int, int>> possible_resolutions;
|
||||||
|
for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) {
|
||||||
|
possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]});
|
||||||
|
}
|
||||||
|
std::pair<int, int> best_resolution = select_best_resolution({img->nx, img->ny}, possible_resolutions);
|
||||||
|
// clip_image_save_to_bmp(*img, "input.bmp");
|
||||||
|
resize_and_pad_image(*img, *temp, best_resolution); // we do not pad with mean-bg color anymore in llava-1.6
|
||||||
|
// clip_image_save_to_bmp(*temp, "resized.bmp");
|
||||||
|
// visually verify normalized image:
|
||||||
|
// normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
|
||||||
|
// {
|
||||||
|
// clip_image_u8 * temp2 = clip_image_u8_init();
|
||||||
|
// clip_image_convert_f32_to_u8(*res, *temp2);
|
||||||
|
// clip_image_save_to_bmp(*temp2, "resized_normalized_f32.bmp");
|
||||||
|
// clip_image_u8_free(temp2);
|
||||||
|
// }
|
||||||
|
|
||||||
|
std::vector<clip_image_u8 *> patches = divide_to_patches_u8(*temp, params.image_size); // prepare spatial sorted main patches of image_size each (336 in llava-1.6)
|
||||||
|
|
||||||
|
clip_image_u8 *image_original_resize = clip_image_u8_init();
|
||||||
|
// bilinear_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
|
||||||
|
bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
|
||||||
|
patches.insert(patches.begin(), image_original_resize);
|
||||||
|
// clip_image_f32_batch_init(patches.size());
|
||||||
|
res_imgs.size = patches.size();
|
||||||
|
res_imgs.data = new clip_image_f32[res_imgs.size];
|
||||||
|
int num=0;
|
||||||
|
for (auto& patch : patches) {
|
||||||
|
normalize_image_u8_to_f32(patch, &res_imgs.data[num], ctx->image_mean, ctx->image_std);
|
||||||
|
num++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < patches.size(); i++) {
|
||||||
|
// printf("patch %d: %d %d\n", i, patches[i]->nx, patches[i]->ny);
|
||||||
|
clip_image_u8_free(patches[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
clip_image_u8_free(temp);
|
||||||
|
|
||||||
|
return true;
|
||||||
} else {
|
} else {
|
||||||
temp->nx = img->nx;
|
temp->nx = img->nx;
|
||||||
temp->ny = img->ny;
|
temp->ny = img->ny;
|
||||||
temp->buf.resize(img->buf.size());
|
temp->buf.resize(img->buf.size());
|
||||||
memcpy(temp->buf.data(), img->buf.data(), temp->buf.size());
|
memcpy(temp->buf.data(), img->buf.data(), temp->buf.size());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const int nx = temp->nx;
|
const int nx = temp->nx;
|
||||||
const int ny = temp->ny;
|
const int ny = temp->ny;
|
||||||
|
// clip_image_save_to_bmp(*temp, "resized_vanilla.bmp");
|
||||||
|
|
||||||
const int nx2 = ctx->vision_model.hparams.image_size;
|
const int nx2 = ctx->vision_model.hparams.image_size;
|
||||||
const int ny2 = ctx->vision_model.hparams.image_size;
|
const int ny2 = ctx->vision_model.hparams.image_size;
|
||||||
|
clip_image_f32 * res = clip_image_f32_init();
|
||||||
res->nx = nx2;
|
res->nx = nx2;
|
||||||
res->ny = ny2;
|
res->ny = ny2;
|
||||||
res->buf.resize(3 * nx2 * ny2);
|
res->buf.resize(3 * nx2 * ny2);
|
||||||
|
@ -1184,9 +1649,26 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli
|
||||||
}
|
}
|
||||||
clip_image_u8_free(temp);
|
clip_image_u8_free(temp);
|
||||||
|
|
||||||
|
// {
|
||||||
|
// clip_image_u8 * temp2 = clip_image_u8_init();
|
||||||
|
// clip_image_convert_f32_to_u8(*res, *temp2);
|
||||||
|
// clip_image_save_to_bmp(*temp2, "resized_normalized_f32_vanilla.bmp");
|
||||||
|
// clip_image_u8_free(temp2);
|
||||||
|
// }
|
||||||
|
// res_imgs.push_back(res);
|
||||||
|
|
||||||
|
res_imgs.size = 1;
|
||||||
|
res_imgs.data = new clip_image_f32[res_imgs.size];
|
||||||
|
res_imgs.data[0] = *res;
|
||||||
|
clip_image_f32_free(res);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
|
||||||
|
return ctx->vision_model.image_newline;
|
||||||
|
}
|
||||||
|
|
||||||
void clip_free(clip_ctx * ctx) {
|
void clip_free(clip_ctx * ctx) {
|
||||||
ggml_free(ctx->ctx_data);
|
ggml_free(ctx->ctx_data);
|
||||||
gguf_free(ctx->ctx_gguf);
|
gguf_free(ctx->ctx_gguf);
|
||||||
|
@ -1194,6 +1676,42 @@ void clip_free(clip_ctx * ctx) {
|
||||||
delete ctx;
|
delete ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
|
||||||
|
return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t clip_image_size(const struct clip_ctx * ctx) {
|
||||||
|
return ctx->vision_model.hparams.image_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t clip_patch_size(const struct clip_ctx * ctx) {
|
||||||
|
return ctx->vision_model.hparams.patch_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t clip_hidden_size(const struct clip_ctx * ctx) {
|
||||||
|
return ctx->vision_model.hparams.hidden_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
|
||||||
|
return ctx->vision_model.hparams.mm_patch_merge_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
|
||||||
|
return ctx->vision_model.hparams.image_grid_pinpoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
int clip_n_patches(const struct clip_ctx * ctx) {
|
||||||
|
const auto & params = ctx->vision_model.hparams;
|
||||||
|
|
||||||
|
int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
|
||||||
|
|
||||||
|
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
||||||
|
n_patches /= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
return n_patches;
|
||||||
|
}
|
||||||
|
|
||||||
bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
|
bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
|
||||||
if (!ctx->has_vision_encoder) {
|
if (!ctx->has_vision_encoder) {
|
||||||
printf("This gguf file seems to have no vision encoder\n");
|
printf("This gguf file seems to have no vision encoder\n");
|
||||||
|
@ -1213,7 +1731,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
}
|
}
|
||||||
|
|
||||||
int batch_size = imgs->size;
|
int batch_size = imgs->size;
|
||||||
if(ctx->has_llava_projector) {
|
if (ctx->has_llava_projector) {
|
||||||
GGML_ASSERT(batch_size == 1); // TODO: support multiple images
|
GGML_ASSERT(batch_size == 1); // TODO: support multiple images
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1224,6 +1742,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
// set inputs
|
// set inputs
|
||||||
const auto & model = ctx->vision_model;
|
const auto & model = ctx->vision_model;
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
const int image_size = hparams.image_size;
|
const int image_size = hparams.image_size;
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
|
@ -1301,11 +1820,11 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
|
|
||||||
// copy the embeddings to the location passed by the user
|
// copy the embeddings to the location passed by the user
|
||||||
ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
|
ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
|
bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
|
||||||
|
|
||||||
ggml_type type = GGML_TYPE_Q4_1;
|
ggml_type type = GGML_TYPE_Q4_1;
|
||||||
|
|
||||||
assert(itype < GGML_TYPE_COUNT);
|
assert(itype < GGML_TYPE_COUNT);
|
||||||
|
@ -1494,26 +2013,13 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
||||||
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
||||||
return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
|
return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
|
||||||
}
|
}
|
||||||
else if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
||||||
return ctx->vision_model.mm_2_b->ne[0];
|
return ctx->vision_model.mm_2_b->ne[0];
|
||||||
} else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
}
|
||||||
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||||
return ctx->vision_model.mm_3_b->ne[0];
|
return ctx->vision_model.mm_3_b->ne[0];
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
|
std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
|
||||||
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int clip_n_patches(const struct clip_ctx * ctx) {
|
|
||||||
auto & params = ctx->vision_model.hparams;
|
|
||||||
int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
|
|
||||||
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
|
||||||
n_patches /= 4;
|
|
||||||
}
|
|
||||||
return n_patches;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
|
|
||||||
return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,25 +24,7 @@ struct clip_ctx;
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct clip_vision_hparams {
|
struct clip_ctx;
|
||||||
int32_t image_size;
|
|
||||||
int32_t patch_size;
|
|
||||||
int32_t hidden_size;
|
|
||||||
int32_t n_intermediate;
|
|
||||||
int32_t projection_dim;
|
|
||||||
int32_t n_head;
|
|
||||||
int32_t n_layer;
|
|
||||||
float eps;
|
|
||||||
};
|
|
||||||
|
|
||||||
CLIP_API struct clip_ctx * clip_model_load(const char * fname, int verbosity);
|
|
||||||
|
|
||||||
CLIP_API void clip_free(struct clip_ctx * ctx);
|
|
||||||
|
|
||||||
CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx);
|
|
||||||
|
|
||||||
CLIP_API int clip_n_patches (const struct clip_ctx * ctx);
|
|
||||||
CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);
|
|
||||||
|
|
||||||
struct clip_image_u8_batch {
|
struct clip_image_u8_batch {
|
||||||
struct clip_image_u8 * data;
|
struct clip_image_u8 * data;
|
||||||
|
@ -54,18 +36,43 @@ struct clip_image_f32_batch {
|
||||||
size_t size;
|
size_t size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity);
|
||||||
|
CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity);
|
||||||
|
|
||||||
|
CLIP_API void clip_free(struct clip_ctx * ctx);
|
||||||
|
|
||||||
|
CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
|
CLIP_API int32_t clip_image_size (const struct clip_ctx * ctx);
|
||||||
|
CLIP_API int32_t clip_patch_size (const struct clip_ctx * ctx);
|
||||||
|
CLIP_API int32_t clip_hidden_size(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
|
// TODO: should be enum, not string
|
||||||
|
CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
|
CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
|
CLIP_API int clip_n_patches (const struct clip_ctx * ctx);
|
||||||
|
CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
CLIP_API struct clip_image_u8 * clip_image_u8_init ();
|
CLIP_API struct clip_image_u8 * clip_image_u8_init ();
|
||||||
CLIP_API struct clip_image_f32 * clip_image_f32_init();
|
CLIP_API struct clip_image_f32 * clip_image_f32_init();
|
||||||
|
|
||||||
CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
|
CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
|
||||||
CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
|
CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
|
||||||
|
CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch & batch);
|
||||||
|
CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch & batch);
|
||||||
|
|
||||||
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
||||||
|
|
||||||
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
||||||
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
||||||
|
|
||||||
CLIP_API bool clip_image_preprocess (struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, bool pad2square);
|
/** preprocess img and store the result in res_imgs, pad_to_square may be overriden to false depending on model configuration */
|
||||||
|
CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch & res_imgs );
|
||||||
|
|
||||||
|
CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
|
||||||
|
|
||||||
CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
|
CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
|
||||||
CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
|
CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
|
||||||
|
|
||||||
|
|
|
@ -78,18 +78,19 @@ ap.add_argument("--text-only", action="store_true", required=False,
|
||||||
help="Save a text-only model. It can't be used to encode images")
|
help="Save a text-only model. It can't be used to encode images")
|
||||||
ap.add_argument("--vision-only", action="store_true", required=False,
|
ap.add_argument("--vision-only", action="store_true", required=False,
|
||||||
help="Save a vision-only model. It can't be used to encode texts")
|
help="Save a vision-only model. It can't be used to encode texts")
|
||||||
ap.add_argument("--clip_model_is_vision", action="store_true", required=False,
|
ap.add_argument("--clip-model-is-vision", action="store_true", required=False,
|
||||||
help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
|
help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
|
||||||
|
ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
|
||||||
|
help="The clip model is from openclip (for ViT-SO400M type))")
|
||||||
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
|
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
|
||||||
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp", choices=["mlp", "ldp"], default="mlp")
|
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp", choices=["mlp", "ldp"], default="mlp")
|
||||||
ap.add_argument("--image-mean", nargs=3, type=float, required=False, help="Override image mean values")
|
|
||||||
ap.add_argument("--image-std", nargs=3, type=float, required=False, help="Override image std values")
|
|
||||||
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
|
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
|
||||||
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
|
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
|
||||||
|
# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
|
||||||
default_image_mean = [0.48145466, 0.4578275, 0.40821073]
|
default_image_mean = [0.48145466, 0.4578275, 0.40821073]
|
||||||
default_image_std = [0.26862954, 0.26130258, 0.27577711]
|
default_image_std = [0.26862954, 0.26130258, 0.27577711]
|
||||||
ap.add_argument('--image_mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
|
ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
|
||||||
ap.add_argument('--image_std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
|
ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
|
||||||
|
|
||||||
# with proper
|
# with proper
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
|
@ -105,7 +106,7 @@ if args.use_f32:
|
||||||
# output in the same directory as the model if output_dir is None
|
# output in the same directory as the model if output_dir is None
|
||||||
dir_model = args.model_dir
|
dir_model = args.model_dir
|
||||||
|
|
||||||
if args.clip_model_is_vision:
|
if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip:
|
||||||
vocab = None
|
vocab = None
|
||||||
tokens = None
|
tokens = None
|
||||||
else:
|
else:
|
||||||
|
@ -133,7 +134,7 @@ ftype = 1
|
||||||
if args.use_f32:
|
if args.use_f32:
|
||||||
ftype = 0
|
ftype = 0
|
||||||
|
|
||||||
if args.clip_model_is_vision:
|
if args.clip_model_is_vision or args.clip_model_is_openclip:
|
||||||
model = CLIPVisionModel.from_pretrained(dir_model)
|
model = CLIPVisionModel.from_pretrained(dir_model)
|
||||||
processor = None
|
processor = None
|
||||||
else:
|
else:
|
||||||
|
@ -202,6 +203,57 @@ if has_vision_encoder:
|
||||||
fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), v_hparams["layer_norm_eps"])
|
fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), v_hparams["layer_norm_eps"])
|
||||||
block_count = v_hparams["num_hidden_layers"] - 1 if has_llava_projector else v_hparams["num_hidden_layers"]
|
block_count = v_hparams["num_hidden_layers"] - 1 if has_llava_projector else v_hparams["num_hidden_layers"]
|
||||||
fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count)
|
fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count)
|
||||||
|
# /**
|
||||||
|
# "image_grid_pinpoints": [
|
||||||
|
# [
|
||||||
|
# 336,
|
||||||
|
# 672
|
||||||
|
# ],
|
||||||
|
# [
|
||||||
|
# 672,
|
||||||
|
# 336
|
||||||
|
# ],
|
||||||
|
# [
|
||||||
|
# 672,
|
||||||
|
# 672
|
||||||
|
# ],
|
||||||
|
# [
|
||||||
|
# 1008,
|
||||||
|
# 336
|
||||||
|
# ],
|
||||||
|
# [
|
||||||
|
# 336,
|
||||||
|
# 1008
|
||||||
|
# ]
|
||||||
|
# ],
|
||||||
|
# Flattened:
|
||||||
|
# [
|
||||||
|
# 336, 672,
|
||||||
|
# 672, 336,
|
||||||
|
# 672, 672,
|
||||||
|
# 1008, 336,
|
||||||
|
# 336, 1008
|
||||||
|
# ]
|
||||||
|
# *
|
||||||
|
# */
|
||||||
|
if "image_grid_pinpoints" in v_hparams:
|
||||||
|
# flatten it
|
||||||
|
image_grid_pinpoints = []
|
||||||
|
for pinpoint in v_hparams["image_grid_pinpoints"]:
|
||||||
|
for p in pinpoint:
|
||||||
|
image_grid_pinpoints.append(p)
|
||||||
|
fout.add_array("clip.vision.image_grid_pinpoints", image_grid_pinpoints)
|
||||||
|
if "image_crop_resolution" in v_hparams:
|
||||||
|
fout.add_uint32("clip.vision.image_crop_resolution", v_hparams["image_crop_resolution"])
|
||||||
|
if "image_aspect_ratio" in v_hparams:
|
||||||
|
fout.add_string("clip.vision.image_aspect_ratio", v_hparams["image_aspect_ratio"])
|
||||||
|
if "image_split_resolution" in v_hparams:
|
||||||
|
fout.add_uint32("clip.vision.image_split_resolution", v_hparams["image_split_resolution"])
|
||||||
|
if "mm_patch_merge_type" in v_hparams:
|
||||||
|
fout.add_string("clip.vision.mm_patch_merge_type", v_hparams["mm_patch_merge_type"])
|
||||||
|
if "mm_projector_type" in v_hparams:
|
||||||
|
fout.add_string("clip.vision.mm_projector_type", v_hparams["mm_projector_type"])
|
||||||
|
|
||||||
|
|
||||||
if processor is not None:
|
if processor is not None:
|
||||||
image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean
|
image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean
|
||||||
|
|
|
@ -155,11 +155,29 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||||
system_prompt = prompt.substr(0, image_pos);
|
system_prompt = prompt.substr(0, image_pos);
|
||||||
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
|
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
|
||||||
printf("system_prompt: %s\n", system_prompt.c_str());
|
printf("system_prompt: %s\n", system_prompt.c_str());
|
||||||
|
if (params->verbose_prompt) {
|
||||||
|
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
|
||||||
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
|
printf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
printf("user_prompt: %s\n", user_prompt.c_str());
|
printf("user_prompt: %s\n", user_prompt.c_str());
|
||||||
|
if (params->verbose_prompt) {
|
||||||
|
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||||
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
|
printf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// llava-1.5 native mode
|
// llava-1.5 native mode
|
||||||
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
|
system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
|
||||||
user_prompt = prompt + "\nASSISTANT:";
|
user_prompt = prompt + "\nASSISTANT:";
|
||||||
|
if (params->verbose_prompt) {
|
||||||
|
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
|
||||||
|
for (int i = 0; i < (int) tmp.size(); i++) {
|
||||||
|
printf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, add_bos);
|
eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, add_bos);
|
||||||
|
@ -171,13 +189,17 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
||||||
|
std::string response = "";
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
for (int i = 0; i < max_tgt_len; i++) {
|
||||||
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
||||||
|
response += tmp;
|
||||||
if (strcmp(tmp, "</s>") == 0) break;
|
if (strcmp(tmp, "</s>") == 0) break;
|
||||||
if (strstr(tmp, "###")) break; // Yi-VL behavior
|
if (strstr(tmp, "###")) break; // Yi-VL behavior
|
||||||
|
|
||||||
printf("%s", tmp);
|
printf("%s", tmp);
|
||||||
|
if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
|
||||||
|
if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
|
||||||
|
if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
|
||||||
|
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +218,8 @@ static struct llava_context * llava_init(gpt_params * params) {
|
||||||
|
|
||||||
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
||||||
|
|
||||||
llama_backend_init(params->numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params->numa);
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
||||||
|
|
||||||
|
|
167
examples/llava/llava-surgery-v2.py
Normal file
167
examples/llava/llava-surgery-v2.py
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
import argparse
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import torch
|
||||||
|
from safetensors.torch import load as safe_load, save as safe_save, safe_open, save_file
|
||||||
|
|
||||||
|
# Function to determine if file is a SafeTensor file
|
||||||
|
def is_safetensor_file(file_path):
|
||||||
|
return file_path.endswith('.safetensors')
|
||||||
|
|
||||||
|
|
||||||
|
# Unified loading function
|
||||||
|
def load_model(file_path):
|
||||||
|
if is_safetensor_file(file_path):
|
||||||
|
tensors = {}
|
||||||
|
with safe_open(file_path, framework="pt", device="cpu") as f:
|
||||||
|
for key in f.keys():
|
||||||
|
tensors[key] = f.get_tensor(key).clone()
|
||||||
|
# output shape
|
||||||
|
print(f"{key} : {tensors[key].shape}")
|
||||||
|
return tensors, 'safetensor'
|
||||||
|
else:
|
||||||
|
return torch.load(file_path, map_location=torch.device('cpu')), 'pytorch'
|
||||||
|
|
||||||
|
|
||||||
|
# Unified saving function
|
||||||
|
def save_model(model, file_path, file_type):
|
||||||
|
if file_type == 'safetensor':
|
||||||
|
# safe_save(model, file_path)
|
||||||
|
save_file(model, file_path)
|
||||||
|
else:
|
||||||
|
torch.save(model, file_path)
|
||||||
|
|
||||||
|
|
||||||
|
# Adapted function to clean vision tower from checkpoint
|
||||||
|
def clean_vision_tower_from_checkpoint(checkpoint_path):
|
||||||
|
checkpoint, file_type = load_model(checkpoint_path)
|
||||||
|
# file_type = 'pytorch'
|
||||||
|
model_path = os.path.dirname(checkpoint_path)
|
||||||
|
print(f"Searching for vision tower tensors in {checkpoint_path}")
|
||||||
|
clip_tensors = [k for k, v in checkpoint.items() if (k.startswith("model.vision_tower") or k.startswith("vit."))]
|
||||||
|
|
||||||
|
if len(clip_tensors) > 0:
|
||||||
|
print(f"Found {len(clip_tensors)} tensors to extract from {checkpoint_path}")
|
||||||
|
# Adapted for file type
|
||||||
|
clip_path = os.path.join(model_path, "llava.clip")
|
||||||
|
|
||||||
|
if os.path.exists(clip_path):
|
||||||
|
print(f"Loading existing llava.clip from {clip_path}")
|
||||||
|
existing_clip, _ = load_model(clip_path)
|
||||||
|
else:
|
||||||
|
print(f"Creating new llava.clip at {clip_path}")
|
||||||
|
existing_clip = {}
|
||||||
|
# Update existing_clip with new tensors, avoid duplicates
|
||||||
|
for name in clip_tensors:
|
||||||
|
simple_name = name[name.index('vision_model.'):] if 'vision_model.' in name else name
|
||||||
|
print(f"Adding {simple_name} to llava.clip")
|
||||||
|
if simple_name not in existing_clip:
|
||||||
|
existing_clip[simple_name] = checkpoint[name]
|
||||||
|
|
||||||
|
# Save the updated clip tensors back to llava.clip
|
||||||
|
save_model(existing_clip, clip_path, 'pytorch')
|
||||||
|
|
||||||
|
# Remove the tensors from the original checkpoint
|
||||||
|
for name in clip_tensors:
|
||||||
|
del checkpoint[name]
|
||||||
|
|
||||||
|
# Save the updated checkpoint
|
||||||
|
checkpoint_path = checkpoint_path
|
||||||
|
save_model(checkpoint, checkpoint_path, file_type)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def find_relevant_checkpoints(checkpoint_paths, newline_criteria, projector):
|
||||||
|
newline_checkpoint_path = None
|
||||||
|
projector_checkpoint_path = None
|
||||||
|
|
||||||
|
for path in checkpoint_paths:
|
||||||
|
checkpoint, _ = load_model(path)
|
||||||
|
if newline_criteria(checkpoint) and newline_checkpoint_path is None:
|
||||||
|
newline_checkpoint_path = path
|
||||||
|
if projector(checkpoint):
|
||||||
|
projector_checkpoint_path = path
|
||||||
|
|
||||||
|
return newline_checkpoint_path, projector_checkpoint_path
|
||||||
|
|
||||||
|
def newline_criteria(checkpoint):
|
||||||
|
return any(k.startswith("model.image_newline") for k in checkpoint.keys())
|
||||||
|
|
||||||
|
def proj_criteria(checkpoint):
|
||||||
|
return any(k.startswith("model.mm_projector") or k.startswith("vision_proj.") for k in checkpoint.keys())
|
||||||
|
|
||||||
|
|
||||||
|
# Command-line interface setup
|
||||||
|
ap = argparse.ArgumentParser()
|
||||||
|
ap.add_argument("-m", "--model", required=True, help="Path to LLaVA v1.5+ model")
|
||||||
|
ap.add_argument("-C", "--clean-vision-tower", action="store_true", help="Remove any vision tower from the model files")
|
||||||
|
args = ap.parse_args()
|
||||||
|
|
||||||
|
if args.clean_vision_tower:
|
||||||
|
# Generalized to handle both PyTorch and SafeTensors models
|
||||||
|
model_files = sorted(glob.glob(f"{args.model}/*"), key=os.path.getmtime, reverse=True)
|
||||||
|
# checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and path.startswith('pytorch')) or (path.endswith('.safetensors') and path.startswith('model'))]
|
||||||
|
checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and 'pytorch' in path.split('/')[-1].split('\\')[-1]) or (path.endswith('.safetensors') and 'model' in path.split('/')[-1].split('\\')[-1])]
|
||||||
|
for projector_checkpoint_path in checkpoint_paths:
|
||||||
|
print(f"Cleaning {projector_checkpoint_path}")
|
||||||
|
if not clean_vision_tower_from_checkpoint(projector_checkpoint_path):
|
||||||
|
print(f"No vision tower found in {projector_checkpoint_path}")
|
||||||
|
# we break once none is found, so far all models append them at the end
|
||||||
|
# break
|
||||||
|
print("Done! All vision tower tensors are removed from the model files and stored in llava.clip file.")
|
||||||
|
|
||||||
|
# Now we look for the projector in the last checkpoint
|
||||||
|
model_files = sorted(glob.glob(f"{args.model}/*"), key=os.path.getmtime, reverse=True)
|
||||||
|
checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and 'pytorch' in path.split('/')[-1].split('\\')[-1]) or (path.endswith('.safetensors') and 'model' in path.split('/')[-1].split('\\')[-1])]
|
||||||
|
# last_checkpoint_path = checkpoint_paths[0]
|
||||||
|
# first_checkpoint_path = checkpoint_paths[-1]
|
||||||
|
newline_checkpoint_path, projector_checkpoint_path = find_relevant_checkpoints(checkpoint_paths, newline_criteria, proj_criteria)
|
||||||
|
|
||||||
|
print(f"Taking projector from {projector_checkpoint_path}")
|
||||||
|
first_mm_tensors = []
|
||||||
|
first_checkpoint = None
|
||||||
|
if newline_checkpoint_path is not None:
|
||||||
|
print(f"Taking newline from {newline_checkpoint_path}")
|
||||||
|
first_checkpoint, file_type = load_model(newline_checkpoint_path)
|
||||||
|
first_mm_tensors = [k for k, v in first_checkpoint.items() if k.startswith("model.image_newline")]
|
||||||
|
|
||||||
|
# Load the checkpoint
|
||||||
|
mm_tensors = []
|
||||||
|
last_checkpoint = None
|
||||||
|
if projector_checkpoint_path is not None:
|
||||||
|
last_checkpoint, file_type = load_model(projector_checkpoint_path)
|
||||||
|
mm_tensors = [k for k, v in last_checkpoint.items() if k.startswith("model.mm_projector") or k.startswith("vision_proj.")]
|
||||||
|
|
||||||
|
if len(mm_tensors) == 0:
|
||||||
|
if last_checkpoint is not None:
|
||||||
|
for k, v in last_checkpoint.items():
|
||||||
|
print(k)
|
||||||
|
print(f"Found {len(mm_tensors)} tensors to extract out of {len(last_checkpoint)} tensors.")
|
||||||
|
print("No tensors found. Is this a LLaVA model?")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
print(f"Found {len(mm_tensors)} tensors to extract.")
|
||||||
|
print(f"Found additional {len(first_mm_tensors)} tensors to extract.")
|
||||||
|
# projector = {name: checkpoint.[name].float() for name in mm_tensors}
|
||||||
|
projector = {}
|
||||||
|
for name in mm_tensors:
|
||||||
|
projector[name] = last_checkpoint[name].float()
|
||||||
|
for name in first_mm_tensors:
|
||||||
|
projector[name] = first_checkpoint[name].float()
|
||||||
|
|
||||||
|
if len(projector) > 0:
|
||||||
|
save_model(projector, f"{args.model}/llava.projector", 'pytorch')
|
||||||
|
|
||||||
|
for name in mm_tensors:
|
||||||
|
del last_checkpoint[name]
|
||||||
|
for name in first_mm_tensors:
|
||||||
|
del first_checkpoint[name]
|
||||||
|
|
||||||
|
if len(mm_tensors) > 0:
|
||||||
|
save_model(last_checkpoint, projector_checkpoint_path, file_type)
|
||||||
|
if len(first_mm_tensors) > 0:
|
||||||
|
save_model(first_checkpoint, newline_checkpoint_path, file_type)
|
||||||
|
|
||||||
|
print("Done!")
|
||||||
|
print(f"Now you can convert {args.model} to a a regular LLaMA GGUF file.")
|
||||||
|
print(f"Also, use {args.model}/llava.projector to prepare a llava-encoder.gguf file.")
|
|
@ -2,31 +2,295 @@
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "llava.h"
|
#include "llava.h"
|
||||||
|
#include "base64.hpp"
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <numeric>
|
||||||
|
|
||||||
|
// RGB uint8 image
|
||||||
|
struct clip_image_u8 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<uint8_t> buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
// RGB float32 image (NHWC)
|
||||||
|
// Memory layout: RGBRGBRGB...
|
||||||
|
struct clip_image_f32 {
|
||||||
|
int nx;
|
||||||
|
int ny;
|
||||||
|
|
||||||
|
std::vector<float> buf;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct clip_image_grid_shape {
|
||||||
|
int first;
|
||||||
|
int second;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Selects the best resolution from a list of possible resolutions based on the original size.
|
||||||
|
*
|
||||||
|
* @param original_size The original size of the image in the format (width, height).
|
||||||
|
* @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
|
||||||
|
* @return The best fit resolution in the format (width, height).
|
||||||
|
*/
|
||||||
|
static std::pair<int, int> select_best_resolution(const std::pair<int, int>& original_size, const std::vector<std::pair<int, int>>& possible_resolutions) {
|
||||||
|
int original_width = original_size.first;
|
||||||
|
int original_height = original_size.second;
|
||||||
|
|
||||||
|
std::pair<int, int> best_fit;
|
||||||
|
int max_effective_resolution = 0;
|
||||||
|
int min_wasted_resolution = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
|
for (const auto& resolution : possible_resolutions) {
|
||||||
|
int width = resolution.first;
|
||||||
|
int height = resolution.second;
|
||||||
|
float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
|
||||||
|
int downscaled_width = static_cast<int>(original_width * scale);
|
||||||
|
int downscaled_height = static_cast<int>(original_height * scale);
|
||||||
|
int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
|
||||||
|
int wasted_resolution = (width * height) - effective_resolution;
|
||||||
|
// fprintf(stderr, "resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
|
||||||
|
if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
|
||||||
|
max_effective_resolution = effective_resolution;
|
||||||
|
min_wasted_resolution = wasted_resolution;
|
||||||
|
best_fit = resolution;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return best_fit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the anyres image grid shape object
|
||||||
|
*
|
||||||
|
* @param image_size
|
||||||
|
* @param grid_pinpoints
|
||||||
|
* @param image_patch_size
|
||||||
|
* @return <int, int>
|
||||||
|
*/
|
||||||
|
static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair<int, int> & image_size, const std::vector<std::pair<int, int>> & grid_pinpoints, int image_patch_size) {
|
||||||
|
/**
|
||||||
|
Conversion from gguf flat array to vector:
|
||||||
|
std::vector<std::pair<int, int>> possible_resolutions;
|
||||||
|
for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) {
|
||||||
|
possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]});
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
auto best_resolution = select_best_resolution(image_size, grid_pinpoints);
|
||||||
|
return {best_resolution.first / image_patch_size, best_resolution.second / image_patch_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out)
|
||||||
|
static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *> & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) {
|
||||||
|
struct {
|
||||||
|
struct ggml_tensor * newline;
|
||||||
|
struct ggml_context * ctx;
|
||||||
|
} model;
|
||||||
|
|
||||||
|
const int32_t image_size = clip_image_size(ctx_clip);
|
||||||
|
const int32_t patch_size = clip_patch_size(ctx_clip);
|
||||||
|
|
||||||
|
int32_t num_patches_per_side = image_size / patch_size; // 336 / 14 = 24 - used for embedding-patching boxes (24*24 = 576 patches)
|
||||||
|
|
||||||
|
int num_patches_width = grid_shape.first; // grid 1-4
|
||||||
|
int num_patches_height = grid_shape.second; // grid 1-4
|
||||||
|
|
||||||
|
const size_t num_images = num_patches_width * num_patches_height + 1;
|
||||||
|
|
||||||
|
// TODO: size calculation is not calculated - it's only tens of MB
|
||||||
|
size_t ctx_size = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
ctx_size += clip_embd_nbytes(ctx_clip) * num_images * 8; // image_features
|
||||||
|
ctx_size += 1024*1024 * ggml_type_size(GGML_TYPE_F32);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_init_params params {
|
||||||
|
/*.mem_size =*/ ctx_size,
|
||||||
|
/*.mem_buffer =*/ NULL,
|
||||||
|
/*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API
|
||||||
|
};
|
||||||
|
|
||||||
|
// Python reference code for full unpad:
|
||||||
|
/*
|
||||||
|
base_image_feature = image_feature[0]
|
||||||
|
image_feature = image_feature[1:]
|
||||||
|
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
|
||||||
|
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
|
||||||
|
image_feature = unpad_image(image_feature, image_sizes[image_idx])
|
||||||
|
image_feature = torch.cat((
|
||||||
|
image_feature,
|
||||||
|
self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1)
|
||||||
|
), dim=-1)
|
||||||
|
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
|
||||||
|
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
|
||||||
|
*/
|
||||||
|
// We now have two options: unpad or no unpad. Unpad removes tokens for faster llm eval.
|
||||||
|
// In terms of result quality it appears to make no difference, so we'll start with the easier approach given 5D tensors are not supported in ggml yet.
|
||||||
|
// Without unpad we have to split the sub-image embeddings into patches of 24 features each and permute them.
|
||||||
|
// Once all images are processed to prepended the base_image_features without any changes.
|
||||||
|
|
||||||
|
// Pytorch reference simplified, modified for ggml compatibility - confirmed identical output in python (for a 2x2 grid image (676x676 scaling))
|
||||||
|
/*
|
||||||
|
image_feature = image_feature.view(2, 2, 24, 24, 4096)
|
||||||
|
image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
|
||||||
|
image_feature = image_feature.view(2, 24, 2, 24, 4096)
|
||||||
|
image_feature = image_feature.flatten(0, 3)
|
||||||
|
|
||||||
|
// Reshape to 4D tensor by merging the last two dimensions
|
||||||
|
image_feature = image_feature.view(2, 2, 24, 24*4096)
|
||||||
|
image_feature = image_feature.permute(0, 2, 1, 3).contiguous()
|
||||||
|
image_feature = image_feature.view(-1, 4096)
|
||||||
|
*/
|
||||||
|
|
||||||
|
model.ctx = ggml_init(params);
|
||||||
|
|
||||||
|
ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip);
|
||||||
|
model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]);
|
||||||
|
if (newline_tmp->backend != GGML_BACKEND_CPU) {
|
||||||
|
if (newline_tmp->buffer == NULL) {
|
||||||
|
printf("newline_tmp tensor buffer is NULL\n");
|
||||||
|
}
|
||||||
|
ggml_backend_tensor_get(newline_tmp, model.newline->data, 0, ggml_nbytes(newline_tmp));
|
||||||
|
} else {
|
||||||
|
model.newline->data = newline_tmp->data;
|
||||||
|
if (model.newline->data == NULL) {
|
||||||
|
printf("newline_tmp tensor data is NULL\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4
|
||||||
|
// ggml_tensor_printf(image_features,"image_features",__LINE__,false,false);
|
||||||
|
// fill it with the image embeddings, ignoring the base
|
||||||
|
for (size_t i = 1; i < num_images; i++) {
|
||||||
|
size_t offset = (i-1) * clip_embd_nbytes(ctx_clip);
|
||||||
|
memcpy((uint8_t *)(image_features->data) + offset, image_embd_v[i], clip_embd_nbytes(ctx_clip));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_cgraph * gf = ggml_new_graph(model.ctx);
|
||||||
|
size_t size_ele = ggml_type_size(GGML_TYPE_F32);
|
||||||
|
|
||||||
|
struct ggml_tensor *image_features_patchview = ggml_view_4d(model.ctx, image_features,
|
||||||
|
num_patches_per_side * clip_n_mmproj_embd(ctx_clip),
|
||||||
|
num_patches_per_side,
|
||||||
|
num_patches_width,
|
||||||
|
num_patches_height,
|
||||||
|
size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip),
|
||||||
|
size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side,
|
||||||
|
size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side * num_patches_width, 0);
|
||||||
|
// ggml_tensor_printf(image_features_patchview,"image_features_patchview",__LINE__,false,false);
|
||||||
|
struct ggml_tensor *permuted_cont = ggml_cont(model.ctx, ggml_permute(model.ctx, image_features_patchview, 0, 2, 1, 3));
|
||||||
|
/**
|
||||||
|
At the end of each row we have to add the row_end embeddings, which are the same as the newline embeddings
|
||||||
|
image_feature = torch.cat((
|
||||||
|
image_feature,
|
||||||
|
self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)
|
||||||
|
), dim=-1)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ggml_tensor_printf(permuted_cont,"permuted_cont",__LINE__,false,false);
|
||||||
|
struct ggml_tensor *flatten = ggml_view_2d(model.ctx, permuted_cont, clip_n_mmproj_embd(ctx_clip), num_patches_height * num_patches_width * num_patches_per_side * num_patches_per_side, size_ele * clip_n_mmproj_embd(ctx_clip), 0);
|
||||||
|
// ggml_tensor_printf(flatten,"flatten",__LINE__,false,false);
|
||||||
|
ggml_build_forward_expand(gf, flatten);
|
||||||
|
ggml_graph_compute_with_ctx(model.ctx, gf, 1);
|
||||||
|
struct ggml_tensor* result = gf->nodes[gf->n_nodes - 1];
|
||||||
|
|
||||||
|
memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as global context
|
||||||
|
// append without newline tokens (default behavior in llava_arch when not using unpad ):
|
||||||
|
memcpy(image_embd_out + clip_n_patches(ctx_clip) * clip_n_mmproj_embd(ctx_clip), (float*)result->data, clip_embd_nbytes(ctx_clip) * (num_images-1)); // grid patches
|
||||||
|
*n_img_pos_out = static_cast<int>(result->ne[1]+clip_n_patches(ctx_clip));
|
||||||
|
|
||||||
|
// Debug: Test single segments
|
||||||
|
// Current findings: sending base image, sending a segment embedding all works similar to python
|
||||||
|
// However, permuted embeddings do not work yet (stride issue?)
|
||||||
|
// memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as context
|
||||||
|
// memcpy(image_embd_out, (float*)prepared_cont->data, clip_embd_nbytes(ctx_clip)); // main image as context
|
||||||
|
// *n_img_pos_out=576;
|
||||||
|
|
||||||
|
ggml_free(model.ctx);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#include "base64.hpp"
|
|
||||||
|
|
||||||
static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) {
|
static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) {
|
||||||
clip_image_f32 * img_res = clip_image_f32_init();
|
// std::vector<clip_image_f32*> img_res_v; // format VectN x H x W x RGB (N x 336 x 336 x 3), so interleaved RGB - different to the python implementation which is N x 3 x 336 x 336
|
||||||
if (!clip_image_preprocess(ctx_clip, img, img_res, /*pad2square =*/ true)) {
|
clip_image_f32_batch img_res_v;
|
||||||
|
img_res_v.size = 0;
|
||||||
|
img_res_v.data = nullptr;
|
||||||
|
if (!clip_image_preprocess(ctx_clip, img, img_res_v)) {
|
||||||
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
fprintf(stderr, "%s: unable to preprocess image\n", __func__);
|
||||||
clip_image_f32_free(img_res);
|
delete[] img_res_v.data;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
*n_img_pos = clip_n_patches(ctx_clip);
|
|
||||||
|
|
||||||
const int64_t t_img_enc_start_us = ggml_time_us();
|
const int64_t t_img_enc_start_us = ggml_time_us();
|
||||||
bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd);
|
|
||||||
clip_image_f32_free(img_res);
|
const char * mm_patch_merge_type = clip_patch_merge_type(ctx_clip);
|
||||||
|
|
||||||
|
if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) {
|
||||||
|
// flat / default llava-1.5 type embedding
|
||||||
|
*n_img_pos = clip_n_patches(ctx_clip);
|
||||||
|
bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); // image_embd shape is 576 x 4096
|
||||||
|
delete[] img_res_v.data;
|
||||||
if (!encoded) {
|
if (!encoded) {
|
||||||
fprintf(stderr, "Unable to encode image\n");
|
fprintf(stderr, "Unable to encode image\n");
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// spatial_unpad llava-1.6 type embedding
|
||||||
|
// TODO: CLIP needs batching support - in HF the llm projection is separate after encoding, which might be a solution to quickly get batching working
|
||||||
|
std::vector<float *> image_embd_v;
|
||||||
|
image_embd_v.resize(img_res_v.size);
|
||||||
|
for (size_t i = 0; i < img_res_v.size; i++) {
|
||||||
|
image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); // 576 patches * 4096 embeddings * 4 bytes = 9437184
|
||||||
|
const bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); // image data is in 3x336x336 format and will be converted to 336x336x3 inside
|
||||||
|
if (!encoded) {
|
||||||
|
fprintf(stderr, "Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const int64_t t_img_enc_batch_us = ggml_time_us();
|
||||||
|
printf("%s: %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0);
|
||||||
|
|
||||||
|
const int32_t * image_grid = clip_image_grid(ctx_clip);
|
||||||
|
|
||||||
|
std::vector<std::pair<int, int>> grid_pinpoints;
|
||||||
|
for (int i = 0; i < 32 && image_grid[i] != 0; i += 2) {
|
||||||
|
grid_pinpoints.push_back({image_grid[i], image_grid[i+1]});
|
||||||
|
}
|
||||||
|
|
||||||
|
// free all img_res_v - not needed anymore
|
||||||
|
delete[] img_res_v.data;
|
||||||
|
img_res_v.size = 0;
|
||||||
|
img_res_v.data = nullptr;
|
||||||
|
|
||||||
|
const int32_t image_size = clip_image_size(ctx_clip);
|
||||||
|
|
||||||
|
struct clip_image_grid_shape grid_shape = get_anyres_image_grid_shape({img->nx,img->ny}, grid_pinpoints, image_size);
|
||||||
|
|
||||||
|
int n_img_pos_out;
|
||||||
|
clip_llava_handle_patches(ctx_clip, image_embd_v, grid_shape, image_embd, &n_img_pos_out);
|
||||||
|
*n_img_pos = n_img_pos_out;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < image_embd_v.size(); i++) {
|
||||||
|
free(image_embd_v[i]);
|
||||||
|
}
|
||||||
|
image_embd_v.clear();
|
||||||
|
|
||||||
|
// debug image/segment/normalization content:
|
||||||
|
// clip_image_u8 * tmp = clip_image_u8_init();
|
||||||
|
// clip_image_convert_f32_to_u8(*image_feature, *tmp);
|
||||||
|
// clip_image_save_to_bmp(*tmp, "image_feature.bmp");
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%s: image embedding created: %d tokens\n", __func__, *n_img_pos);
|
||||||
|
|
||||||
const int64_t t_img_enc_end_us = ggml_time_us();
|
const int64_t t_img_enc_end_us = ggml_time_us();
|
||||||
float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0;
|
float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0;
|
||||||
|
@ -48,10 +312,9 @@ bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx *
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) {
|
static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) {
|
||||||
float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip));
|
float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*6); // TODO: base on gridsize/llava model
|
||||||
if (!image_embd) {
|
if (!image_embd) {
|
||||||
fprintf(stderr, "Unable to allocate memory for image embeddings\n");
|
fprintf(stderr, "Unable to allocate memory for image embeddings\n");
|
||||||
free(image_embd);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +348,7 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
|
struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
|
||||||
clip_image_u8 * img = clip_image_u8_init();
|
clip_image_u8 * img = clip_image_u8_init();
|
||||||
if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
|
if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
|
||||||
clip_image_u8_free(img);
|
clip_image_u8_free(img);
|
||||||
|
@ -142,7 +405,7 @@ static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) {
|
struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) {
|
||||||
unsigned char* image_bytes;
|
unsigned char* image_bytes;
|
||||||
long image_bytes_length;
|
long image_bytes_length;
|
||||||
auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length);
|
auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length);
|
||||||
|
@ -151,13 +414,13 @@ LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length);
|
llava_image_embed *embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length);
|
||||||
free(image_bytes);
|
free(image_bytes);
|
||||||
|
|
||||||
return embed;
|
return embed;
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed) {
|
void llava_image_embed_free(struct llava_image_embed * embed) {
|
||||||
free(embed->embed);
|
free(embed->embed);
|
||||||
free(embed);
|
free(embed);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
|
|
||||||
#ifdef LLAMA_SHARED
|
#ifdef LLAMA_SHARED
|
||||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||||
# ifdef LLAMA_BUILD
|
# ifdef LLAMA_BUILD
|
||||||
|
@ -42,7 +41,6 @@ LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed);
|
||||||
/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */
|
/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */
|
||||||
LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past);
|
LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past);
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -54,7 +54,8 @@ int main(int argc, char ** argv) {
|
||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model = NULL;
|
llama_model * model = NULL;
|
||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
|
|
@ -31,7 +31,8 @@ int main(int argc, char ** argv){
|
||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model = NULL;
|
llama_model * model = NULL;
|
||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
|
|
@ -283,7 +283,11 @@ These options help improve the performance and memory usage of the LLaMA models.
|
||||||
|
|
||||||
### NUMA support
|
### NUMA support
|
||||||
|
|
||||||
- `--numa`: Attempt optimizations that help on some systems with non-uniform memory access. This currently consists of pinning an equal proportion of the threads to the cores on each NUMA node, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
- `--numa distribute`: Pin an equal proportion of the threads to the cores on each NUMA node. This will spread the load amongst all cores on the system, utilitizing all memory channels at the expense of potentially requiring memory to travel over the slow links between nodes.
|
||||||
|
- `--numa isolate`: Pin all threads to the NUMA node that the program starts on. This limits the number of cores and amount of memory that can be used, but guarantees all memory access remains local to the NUMA node.
|
||||||
|
- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitraty core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus.
|
||||||
|
|
||||||
|
These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
||||||
|
|
||||||
### Memory Float 32
|
### Memory Float 32
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,8 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG("%s: llama backend init\n", __func__);
|
LOG("%s: llama backend init\n", __func__);
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
|
@ -124,7 +124,8 @@ int main(int argc, char ** argv) {
|
||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model = NULL;
|
llama_model * model = NULL;
|
||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
|
|
@ -71,7 +71,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
|
|
@ -1810,7 +1810,8 @@ int main(int argc, char ** argv) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
|
@ -238,7 +238,7 @@ int main(int argc, char ** argv) {
|
||||||
params.imatrix = &imatrix_data;
|
params.imatrix = &imatrix_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
// parse command line arguments
|
// parse command line arguments
|
||||||
const std::string fname_inp = argv[arg_idx];
|
const std::string fname_inp = argv[arg_idx];
|
||||||
|
|
|
@ -16,6 +16,13 @@ Command line options:
|
||||||
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
||||||
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
||||||
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
||||||
|
- `--numa STRATEGY`: Attempt one of the below optimization strategies that help on some NUMA systems
|
||||||
|
- `--numa distribute`: Spread execution evenly over all nodes
|
||||||
|
- `--numa isolate`: Only spawn threads on CPUs on the node that execution started on
|
||||||
|
- `--numa numactl`: Use the CPU map provided by numactl
|
||||||
|
if run without this previously, it is recommended to drop the system page cache before using this
|
||||||
|
see https://github.com/ggerganov/llama.cpp/issues/1437
|
||||||
|
|
||||||
- `--numa`: Attempt optimizations that help on some NUMA systems.
|
- `--numa`: Attempt optimizations that help on some NUMA systems.
|
||||||
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
||||||
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
||||||
|
@ -197,6 +204,8 @@ node index.js
|
||||||
|
|
||||||
`system_prompt`: Change the system prompt (initial prompt of all slots), this is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
`system_prompt`: Change the system prompt (initial prompt of all slots), this is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||||
|
|
||||||
|
`samplers`: The order the samplers should be applied in. An array of strings representing sampler type names. If a sampler is not set, it will not be used. If a sampler is specified more than once, it will be applied multiple times. (default: `["top_k", "tfs_z", "typical_p", "top_p", "min_p", "temperature"]` - these are all the available values)
|
||||||
|
|
||||||
### Result JSON
|
### Result JSON
|
||||||
|
|
||||||
- Note: When using streaming mode (`stream`) only `content` and `stop` will be returned until end of completion.
|
- Note: When using streaming mode (`stream`) only `content` and `stop` will be returned until end of completion.
|
||||||
|
|
|
@ -437,10 +437,6 @@ struct llama_server_context
|
||||||
default_generation_settings_for_props["seed"] = -1;
|
default_generation_settings_for_props["seed"] = -1;
|
||||||
|
|
||||||
batch = llama_batch_init(n_ctx, 0, params.n_parallel);
|
batch = llama_batch_init(n_ctx, 0, params.n_parallel);
|
||||||
|
|
||||||
// empty system prompt
|
|
||||||
system_prompt = "";
|
|
||||||
system_tokens.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
|
std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
|
||||||
|
@ -677,6 +673,24 @@ struct llama_server_context
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const auto &samplers_sequence = data.find("samplers");
|
||||||
|
if (samplers_sequence != data.end() && samplers_sequence->is_array())
|
||||||
|
{
|
||||||
|
std::vector<std::string> sampler_names;
|
||||||
|
for (const auto &sampler_name : *samplers_sequence)
|
||||||
|
{
|
||||||
|
if (sampler_name.is_string())
|
||||||
|
{
|
||||||
|
sampler_names.emplace_back(sampler_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
slot->sparams.samplers_sequence = default_sparams.samplers_sequence;
|
||||||
|
}
|
||||||
|
|
||||||
if (multimodal)
|
if (multimodal)
|
||||||
{
|
{
|
||||||
const auto &images_data = data.find("image_data");
|
const auto &images_data = data.find("image_data");
|
||||||
|
@ -766,13 +780,15 @@ struct llama_server_context
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_system_prompt() {
|
void update_system_prompt() {
|
||||||
|
kv_cache_clear();
|
||||||
|
system_tokens.clear();
|
||||||
|
|
||||||
|
if (!system_prompt.empty()) {
|
||||||
system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
|
system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
kv_cache_clear();
|
for (int i = 0; i < (int)system_tokens.size(); ++i)
|
||||||
|
|
||||||
for (int i = 0; i < (int) system_tokens.size(); ++i)
|
|
||||||
{
|
{
|
||||||
llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
|
llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
@ -788,6 +804,7 @@ struct llama_server_context
|
||||||
{
|
{
|
||||||
llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
|
llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
LOG_TEE("system prompt updated\n");
|
LOG_TEE("system prompt updated\n");
|
||||||
system_need_update = false;
|
system_need_update = false;
|
||||||
|
@ -808,11 +825,9 @@ struct llama_server_context
|
||||||
name_user = sys_props.value("anti_prompt", "");
|
name_user = sys_props.value("anti_prompt", "");
|
||||||
name_assistant = sys_props.value("assistant_name", "");
|
name_assistant = sys_props.value("assistant_name", "");
|
||||||
|
|
||||||
if (slots.size() > 0)
|
|
||||||
{
|
|
||||||
notify_system_prompt_changed();
|
notify_system_prompt_changed();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
||||||
const stop_type type, llama_client_slot &slot)
|
const stop_type type, llama_client_slot &slot)
|
||||||
|
@ -969,18 +984,31 @@ struct llama_server_context
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
clip_image_f32 * img_res = clip_image_f32_init();
|
clip_image_f32_batch img_res_v;
|
||||||
if (!clip_image_preprocess(clp_ctx, img.img_data, img_res, /*pad2square =*/ true))
|
img_res_v.size = 0;
|
||||||
|
img_res_v.data = nullptr;
|
||||||
|
if (!clip_image_preprocess(clp_ctx, img.img_data, img_res_v))
|
||||||
{
|
{
|
||||||
LOG_TEE("Error processing the given image");
|
LOG_TEE("Error processing the given image");
|
||||||
clip_free(clp_ctx);
|
clip_free(clp_ctx);
|
||||||
|
clip_image_f32_batch_free(img_res_v);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (img_res_v.size == 0)
|
||||||
|
{
|
||||||
|
LOG_TEE("Error processing the given image");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// note: assumes only one image was returned by clip_image_preprocess
|
||||||
|
clip_image_f32 * img_res = img_res_v.data;
|
||||||
|
|
||||||
img.image_tokens = clip_n_patches(clp_ctx);
|
img.image_tokens = clip_n_patches(clp_ctx);
|
||||||
img.image_embedding = (float *)malloc(clip_embd_nbytes(clp_ctx));
|
img.image_embedding = (float *)malloc(clip_embd_nbytes(clp_ctx));
|
||||||
if (!img.image_embedding)
|
if (!img.image_embedding)
|
||||||
{
|
{
|
||||||
LOG_TEE("Unable to allocate memory for image embeddings\n");
|
LOG_TEE("Unable to allocate memory for image embeddings\n");
|
||||||
|
clip_image_f32_batch_free(img_res_v);
|
||||||
clip_free(clp_ctx);
|
clip_free(clp_ctx);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -988,9 +1016,12 @@ struct llama_server_context
|
||||||
if (!clip_image_encode(clp_ctx, params.n_threads, img_res, img.image_embedding))
|
if (!clip_image_encode(clp_ctx, params.n_threads, img_res, img.image_embedding))
|
||||||
{
|
{
|
||||||
LOG_TEE("Unable to encode image\n");
|
LOG_TEE("Unable to encode image\n");
|
||||||
|
clip_image_f32_batch_free(img_res_v);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
clip_image_f32_free(img_res);
|
|
||||||
|
clip_image_f32_batch_free(img_res_v);
|
||||||
|
|
||||||
img.request_encode_image = false;
|
img.request_encode_image = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1014,6 +1045,12 @@ struct llama_server_context
|
||||||
const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
|
const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
|
||||||
const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
|
const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
|
||||||
eos_bias->second < 0.0f && std::isinf(eos_bias->second);
|
eos_bias->second < 0.0f && std::isinf(eos_bias->second);
|
||||||
|
std::vector<std::string> samplers_sequence;
|
||||||
|
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
||||||
|
{
|
||||||
|
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
|
||||||
|
}
|
||||||
|
|
||||||
return json {
|
return json {
|
||||||
{"n_ctx", slot.n_ctx},
|
{"n_ctx", slot.n_ctx},
|
||||||
{"model", params.model_alias},
|
{"model", params.model_alias},
|
||||||
|
@ -1044,6 +1081,7 @@ struct llama_server_context
|
||||||
{"logit_bias", slot.sparams.logit_bias},
|
{"logit_bias", slot.sparams.logit_bias},
|
||||||
{"n_probs", slot.sparams.n_probs},
|
{"n_probs", slot.sparams.n_probs},
|
||||||
{"grammar", slot.sparams.grammar},
|
{"grammar", slot.sparams.grammar},
|
||||||
|
{"samplers", samplers_sequence}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1840,7 +1878,10 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
||||||
{
|
{
|
||||||
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||||
}
|
}
|
||||||
printf(" --numa attempt optimizations that help on some NUMA systems\n");
|
printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
|
||||||
|
printf(" - distribute: spread execution evenly over all nodes\n");
|
||||||
|
printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
|
||||||
|
printf(" - numactl: use the CPU map provided my numactl\n");
|
||||||
if (llama_supports_gpu_offload()) {
|
if (llama_supports_gpu_offload()) {
|
||||||
printf(" -ngl N, --n-gpu-layers N\n");
|
printf(" -ngl N, --n-gpu-layers N\n");
|
||||||
printf(" number of layers to store in VRAM\n");
|
printf(" number of layers to store in VRAM\n");
|
||||||
|
@ -2249,9 +2290,17 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
||||||
{
|
{
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
}
|
}
|
||||||
else if (arg == "--numa")
|
else if (arg == "--numa") {
|
||||||
{
|
if (++i >= argc) {
|
||||||
params.numa = true;
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
std::string value(argv[i]);
|
||||||
|
/**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
|
||||||
|
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
|
||||||
|
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
|
||||||
|
else { invalid_param = true; break; }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (arg == "--embedding")
|
else if (arg == "--embedding")
|
||||||
{
|
{
|
||||||
|
@ -2482,7 +2531,8 @@ int main(int argc, char **argv)
|
||||||
params.model_alias = params.model;
|
params.model_alias = params.model;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
|
LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
|
||||||
{"commit", LLAMA_COMMIT}});
|
{"commit", LLAMA_COMMIT}});
|
||||||
|
|
|
@ -31,7 +31,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,8 @@ int main(int argc, char ** argv) {
|
||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model_tgt = NULL;
|
llama_model * model_tgt = NULL;
|
||||||
llama_model * model_dft = NULL;
|
llama_model * model_dft = NULL;
|
||||||
|
|
|
@ -17,7 +17,7 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
const bool printing_ids = argc > 3 && std::string(argv[3]) == "--ids";
|
const bool printing_ids = argc > 3 && std::string(argv[3]) == "--ids";
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_default_params();
|
llama_model_params model_params = llama_model_default_params();
|
||||||
model_params.vocab_only = true;
|
model_params.vocab_only = true;
|
||||||
|
|
|
@ -50,9 +50,9 @@ struct my_llama_layer {
|
||||||
struct ggml_tensor * ffn_norm;
|
struct ggml_tensor * ffn_norm;
|
||||||
|
|
||||||
// ff
|
// ff
|
||||||
struct ggml_tensor * w1;
|
struct ggml_tensor * ffn_gate; // w1
|
||||||
struct ggml_tensor * w2;
|
struct ggml_tensor * ffn_down; // w2
|
||||||
struct ggml_tensor * w3;
|
struct ggml_tensor * ffn_up; // w3
|
||||||
};
|
};
|
||||||
|
|
||||||
struct my_llama_model {
|
struct my_llama_model {
|
||||||
|
@ -140,9 +140,9 @@ static void set_param_model(struct my_llama_model * model) {
|
||||||
ggml_set_param(ctx, layer.wv);
|
ggml_set_param(ctx, layer.wv);
|
||||||
ggml_set_param(ctx, layer.wo);
|
ggml_set_param(ctx, layer.wo);
|
||||||
ggml_set_param(ctx, layer.ffn_norm);
|
ggml_set_param(ctx, layer.ffn_norm);
|
||||||
ggml_set_param(ctx, layer.w1);
|
ggml_set_param(ctx, layer.ffn_gate);
|
||||||
ggml_set_param(ctx, layer.w2);
|
ggml_set_param(ctx, layer.ffn_down);
|
||||||
ggml_set_param(ctx, layer.w3);
|
ggml_set_param(ctx, layer.ffn_up);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,9 +198,9 @@ static void init_model(struct my_llama_model * model) {
|
||||||
|
|
||||||
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||||
|
|
||||||
layer.w1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
|
layer.ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
|
||||||
layer.w2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd);
|
layer.ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd);
|
||||||
layer.w3 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
|
layer.ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff);
|
||||||
|
|
||||||
ggml_set_name(layer.attention_norm, tni(LLM_TENSOR_ATTN_NORM, i));
|
ggml_set_name(layer.attention_norm, tni(LLM_TENSOR_ATTN_NORM, i));
|
||||||
|
|
||||||
|
@ -211,9 +211,9 @@ static void init_model(struct my_llama_model * model) {
|
||||||
|
|
||||||
ggml_set_name(layer.ffn_norm, tni(LLM_TENSOR_FFN_NORM, i));
|
ggml_set_name(layer.ffn_norm, tni(LLM_TENSOR_FFN_NORM, i));
|
||||||
|
|
||||||
ggml_set_name(layer.w1, tni(LLM_TENSOR_FFN_GATE, i));
|
ggml_set_name(layer.ffn_gate, tni(LLM_TENSOR_FFN_GATE, i));
|
||||||
ggml_set_name(layer.w2, tni(LLM_TENSOR_FFN_DOWN, i));
|
ggml_set_name(layer.ffn_down, tni(LLM_TENSOR_FFN_DOWN, i));
|
||||||
ggml_set_name(layer.w3, tni(LLM_TENSOR_FFN_UP, i));
|
ggml_set_name(layer.ffn_up, tni(LLM_TENSOR_FFN_UP, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
set_param_model(model);
|
set_param_model(model);
|
||||||
|
@ -244,9 +244,9 @@ static void randomize_model(struct my_llama_model * model, int seed, float mean,
|
||||||
|
|
||||||
randomize_tensor_normal(layer.ffn_norm, rnd);
|
randomize_tensor_normal(layer.ffn_norm, rnd);
|
||||||
|
|
||||||
randomize_tensor_normal(layer.w1, rnd);
|
randomize_tensor_normal(layer.ffn_gate, rnd);
|
||||||
randomize_tensor_normal(layer.w2, rnd);
|
randomize_tensor_normal(layer.ffn_down, rnd);
|
||||||
randomize_tensor_normal(layer.w3, rnd);
|
randomize_tensor_normal(layer.ffn_up, rnd);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_random_normal_distribution(rnd);
|
free_random_normal_distribution(rnd);
|
||||||
|
@ -356,11 +356,11 @@ static struct ggml_tensor * llama_build_train_graphs(
|
||||||
struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, f_norm_rms_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch);
|
struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, f_norm_rms_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t23 = ggml_repeat (ctx, layer.ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch);
|
struct ggml_tensor * t23 = ggml_repeat (ctx, layer.ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch);
|
struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t25 = ggml_mul_mat (ctx, layer.w3, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch);
|
struct ggml_tensor * t25 = ggml_mul_mat (ctx, layer.ffn_up, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t26 = ggml_mul_mat (ctx, layer.w1, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch);
|
struct ggml_tensor * t26 = ggml_mul_mat (ctx, layer.ffn_gate, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch);
|
struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch);
|
struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch);
|
||||||
struct ggml_tensor * t29 = ggml_mul_mat (ctx, layer.w2, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch);
|
struct ggml_tensor * t29 = ggml_mul_mat (ctx, layer.ffn_down, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch);
|
||||||
struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch);
|
struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch);
|
||||||
cur = t30;
|
cur = t30;
|
||||||
checkpoints.push_back(cur);
|
checkpoints.push_back(cur);
|
||||||
|
@ -521,9 +521,9 @@ static void load_llama_model_gguf(struct gguf_context * fctx, struct ggml_contex
|
||||||
copy_tensor_by_name(layer.wv, f_ggml_ctx, tni(LLM_TENSOR_ATTN_V, i));
|
copy_tensor_by_name(layer.wv, f_ggml_ctx, tni(LLM_TENSOR_ATTN_V, i));
|
||||||
copy_tensor_by_name(layer.wo, f_ggml_ctx, tni(LLM_TENSOR_ATTN_OUT, i));
|
copy_tensor_by_name(layer.wo, f_ggml_ctx, tni(LLM_TENSOR_ATTN_OUT, i));
|
||||||
copy_tensor_by_name(layer.ffn_norm, f_ggml_ctx, tni(LLM_TENSOR_FFN_NORM, i));
|
copy_tensor_by_name(layer.ffn_norm, f_ggml_ctx, tni(LLM_TENSOR_FFN_NORM, i));
|
||||||
copy_tensor_by_name(layer.w1, f_ggml_ctx, tni(LLM_TENSOR_FFN_GATE, i));
|
copy_tensor_by_name(layer.ffn_gate, f_ggml_ctx, tni(LLM_TENSOR_FFN_GATE, i));
|
||||||
copy_tensor_by_name(layer.w2, f_ggml_ctx, tni(LLM_TENSOR_FFN_DOWN, i));
|
copy_tensor_by_name(layer.ffn_down, f_ggml_ctx, tni(LLM_TENSOR_FFN_DOWN, i));
|
||||||
copy_tensor_by_name(layer.w3, f_ggml_ctx, tni(LLM_TENSOR_FFN_UP, i));
|
copy_tensor_by_name(layer.ffn_up, f_ggml_ctx, tni(LLM_TENSOR_FFN_UP, i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -664,9 +664,9 @@ static void save_llama_model_gguf(struct gguf_context * fctx, const char * fn_vo
|
||||||
gguf_add_tensor(fctx, layer.wv);
|
gguf_add_tensor(fctx, layer.wv);
|
||||||
gguf_add_tensor(fctx, layer.wo);
|
gguf_add_tensor(fctx, layer.wo);
|
||||||
gguf_add_tensor(fctx, layer.ffn_norm);
|
gguf_add_tensor(fctx, layer.ffn_norm);
|
||||||
gguf_add_tensor(fctx, layer.w1);
|
gguf_add_tensor(fctx, layer.ffn_gate);
|
||||||
gguf_add_tensor(fctx, layer.w2);
|
gguf_add_tensor(fctx, layer.ffn_down);
|
||||||
gguf_add_tensor(fctx, layer.w3);
|
gguf_add_tensor(fctx, layer.ffn_up);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,9 +915,9 @@ static int64_t get_parameter_count(struct my_llama_model* model) {
|
||||||
nx += ggml_nelements(layer.wv);
|
nx += ggml_nelements(layer.wv);
|
||||||
nx += ggml_nelements(layer.wo);
|
nx += ggml_nelements(layer.wo);
|
||||||
nx += ggml_nelements(layer.ffn_norm);
|
nx += ggml_nelements(layer.ffn_norm);
|
||||||
nx += ggml_nelements(layer.w1);
|
nx += ggml_nelements(layer.ffn_gate);
|
||||||
nx += ggml_nelements(layer.w2);
|
nx += ggml_nelements(layer.ffn_down);
|
||||||
nx += ggml_nelements(layer.w3);
|
nx += ggml_nelements(layer.ffn_up);
|
||||||
}
|
}
|
||||||
return nx;
|
return nx;
|
||||||
}
|
}
|
||||||
|
|
|
@ -219,6 +219,10 @@ GGML_CALL void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void *
|
||||||
GGML_ASSERT(buf != NULL && "tensor buffer not set");
|
GGML_ASSERT(buf != NULL && "tensor buffer not set");
|
||||||
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
|
||||||
|
|
||||||
|
if (!size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
tensor->buffer->iface.set_tensor(buf, tensor, data, offset, size);
|
tensor->buffer->iface.set_tensor(buf, tensor, data, offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,6 +233,10 @@ GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void *
|
||||||
GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
|
GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set");
|
||||||
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
|
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
|
||||||
|
|
||||||
|
if (!size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
tensor->buffer->iface.get_tensor(buf, tensor, data, offset, size);
|
tensor->buffer->iface.get_tensor(buf, tensor, data, offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7943,6 +7943,7 @@ GGML_CALL void ggml_init_cublas() {
|
||||||
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
|
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
|
||||||
initialized = true;
|
initialized = true;
|
||||||
g_cublas_loaded = false;
|
g_cublas_loaded = false;
|
||||||
|
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -707,9 +707,21 @@ static void ggml_vk_queue_cleanup(ggml_backend_vk_context * ctx, vk_queue& q) {
|
||||||
q.cmd_buffer_idx = 0;
|
q.cmd_buffer_idx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags) {
|
static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) {
|
||||||
|
for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) {
|
||||||
|
vk::MemoryType memory_type = mem_props->memoryTypes[i];
|
||||||
|
if ((mem_req->memoryTypeBits & ((uint64_t)1 << i)) &&
|
||||||
|
(flags & memory_type.propertyFlags) == flags &&
|
||||||
|
mem_props->memoryHeaps[memory_type.heapIndex].size >= mem_req->size) {
|
||||||
|
return static_cast<int32_t>(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UINT32_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
|
||||||
#ifdef GGML_VULKAN_DEBUG
|
#ifdef GGML_VULKAN_DEBUG
|
||||||
std::cerr << "ggml_vk_create_buffer(" << size << ", " << to_string(req_flags) << ")" << std::endl;
|
std::cerr << "ggml_vk_create_buffer(" << size << ", " << to_string(req_flags) << ", " << to_string(fallback_flags) << ")" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
vk_buffer buf = std::make_shared<vk_buffer_struct>();
|
vk_buffer buf = std::make_shared<vk_buffer_struct>();
|
||||||
|
|
||||||
|
@ -736,15 +748,15 @@ static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t siz
|
||||||
|
|
||||||
uint32_t memory_type_index = UINT32_MAX;
|
uint32_t memory_type_index = UINT32_MAX;
|
||||||
|
|
||||||
for (uint32_t i = 0; i < mem_props.memoryTypeCount; ++i) {
|
memory_type_index = find_properties(&mem_props, &mem_req, req_flags);
|
||||||
vk::MemoryType memory_type = mem_props.memoryTypes[i];
|
buf->memory_property_flags = req_flags;
|
||||||
if ((mem_req.memoryTypeBits & ((uint64_t)1 << i)) && (req_flags & memory_type.propertyFlags) == req_flags && mem_props.memoryHeaps[memory_type.heapIndex].size >= mem_req.size) {
|
|
||||||
memory_type_index = i;
|
if (memory_type_index == UINT32_MAX && fallback_flags) {
|
||||||
break;
|
memory_type_index = find_properties(&mem_props, &mem_req, fallback_flags);
|
||||||
}
|
buf->memory_property_flags = fallback_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (memory_type_index >= mem_props.memoryTypeCount) {
|
if (memory_type_index == UINT32_MAX) {
|
||||||
ctx->device.lock()->device.destroyBuffer(buf->buffer);
|
ctx->device.lock()->device.destroyBuffer(buf->buffer);
|
||||||
buf->size = 0;
|
buf->size = 0;
|
||||||
throw vk::OutOfDeviceMemoryError("No suitable memory type found");
|
throw vk::OutOfDeviceMemoryError("No suitable memory type found");
|
||||||
|
@ -758,10 +770,9 @@ static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t siz
|
||||||
buf->size = 0;
|
buf->size = 0;
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
buf->memory_property_flags = req_flags;
|
|
||||||
buf->ptr = nullptr;
|
buf->ptr = nullptr;
|
||||||
|
|
||||||
if (req_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
|
if (buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible) {
|
||||||
buf->ptr = ctx->device.lock()->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
|
buf->ptr = ctx->device.lock()->device.mapMemory(buf->device_memory, 0, VK_WHOLE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -778,9 +789,9 @@ static vk_buffer ggml_vk_create_buffer(ggml_backend_vk_context * ctx, size_t siz
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vk_buffer ggml_vk_create_buffer_check(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags) {
|
static vk_buffer ggml_vk_create_buffer_check(ggml_backend_vk_context * ctx, size_t size, vk::MemoryPropertyFlags req_flags, vk::MemoryPropertyFlags fallback_flags = vk::MemoryPropertyFlags(0)) {
|
||||||
try {
|
try {
|
||||||
return ggml_vk_create_buffer(ctx, size, req_flags);
|
return ggml_vk_create_buffer(ctx, size, req_flags, fallback_flags);
|
||||||
} catch (const vk::SystemError& e) {
|
} catch (const vk::SystemError& e) {
|
||||||
std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
|
std::cerr << "ggml_vulkan: Memory allocation of size " << size << " failed." << std::endl;
|
||||||
std::cerr << "ggml_vulkan: " << e.what() << std::endl;
|
std::cerr << "ggml_vulkan: " << e.what() << std::endl;
|
||||||
|
@ -791,17 +802,17 @@ static vk_buffer ggml_vk_create_buffer_check(ggml_backend_vk_context * ctx, size
|
||||||
static vk_buffer ggml_vk_create_buffer_device(ggml_backend_vk_context * ctx, size_t size) {
|
static vk_buffer ggml_vk_create_buffer_device(ggml_backend_vk_context * ctx, size_t size) {
|
||||||
vk_buffer buf;
|
vk_buffer buf;
|
||||||
try {
|
try {
|
||||||
buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal);
|
|
||||||
} catch (const vk::SystemError& e) {
|
|
||||||
if (ctx->device.lock()->uma) {
|
if (ctx->device.lock()->uma) {
|
||||||
// Fall back to host memory type
|
// Fall back to host memory type
|
||||||
buf = ggml_vk_create_buffer_check(ctx, size, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
|
buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||||
} else {
|
} else {
|
||||||
|
buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eDeviceLocal);
|
||||||
|
}
|
||||||
|
} catch (const vk::SystemError& e) {
|
||||||
std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
|
std::cerr << "ggml_vulkan: Device memory allocation of size " << size << " failed." << std::endl;
|
||||||
std::cerr << "ggml_vulkan: " << e.what() << std::endl;
|
std::cerr << "ggml_vulkan: " << e.what() << std::endl;
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
@ -1080,7 +1091,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_vk_instance_init() {
|
static void ggml_vk_instance_init() {
|
||||||
if (vk_instance_initialized) {
|
if (vk_instance_initialized) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1139,7 +1150,7 @@ void ggml_vk_instance_init() {
|
||||||
vk_instance_initialized = true;
|
vk_instance_initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
|
static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) {
|
||||||
GGML_ASSERT(idx < vk_instance.device_indices.size());
|
GGML_ASSERT(idx < vk_instance.device_indices.size());
|
||||||
size_t dev_num = vk_instance.device_indices[idx];
|
size_t dev_num = vk_instance.device_indices[idx];
|
||||||
#ifdef GGML_VULKAN_DEBUG
|
#ifdef GGML_VULKAN_DEBUG
|
||||||
|
@ -1422,7 +1433,9 @@ static void * ggml_vk_host_malloc(ggml_backend_vk_context * ctx, size_t size) {
|
||||||
#ifdef GGML_VULKAN_DEBUG
|
#ifdef GGML_VULKAN_DEBUG
|
||||||
std::cerr << "ggml_vk_host_malloc(" << size << ")" << std::endl;
|
std::cerr << "ggml_vk_host_malloc(" << size << ")" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
vk_buffer buf = ggml_vk_create_buffer(ctx, size, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached);
|
vk_buffer buf = ggml_vk_create_buffer(ctx, size,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||||
|
|
||||||
if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
|
if(!(buf->memory_property_flags & vk::MemoryPropertyFlagBits::eHostVisible)) {
|
||||||
fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
|
fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory\n",
|
||||||
|
@ -1568,7 +1581,9 @@ static void deferred_memcpy(void * dst, const void * src, size_t size, std::vect
|
||||||
static void ggml_vk_ensure_sync_staging_buffer(ggml_backend_vk_context * ctx, size_t size) {
|
static void ggml_vk_ensure_sync_staging_buffer(ggml_backend_vk_context * ctx, size_t size) {
|
||||||
if (ctx->sync_staging == nullptr || ctx->sync_staging->size < size) {
|
if (ctx->sync_staging == nullptr || ctx->sync_staging->size < size) {
|
||||||
ggml_vk_destroy_buffer(ctx->sync_staging);
|
ggml_vk_destroy_buffer(ctx->sync_staging);
|
||||||
ctx->sync_staging = ggml_vk_create_buffer_check(ctx, size, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached);
|
ctx->sync_staging = ggml_vk_create_buffer_check(ctx, size,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4082,7 +4097,9 @@ static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
|
||||||
std::cerr << "ggml_vk_preallocate_buffers(qx_size: " << ctx->prealloc_size_qx << " qy_size: " << ctx->prealloc_size_qy << " x_size: " << ctx->prealloc_size_x << " y_size: " << ctx->prealloc_size_y << " split_k_size: " << ctx->prealloc_size_split_k << ")" << std::endl;
|
std::cerr << "ggml_vk_preallocate_buffers(qx_size: " << ctx->prealloc_size_qx << " qy_size: " << ctx->prealloc_size_qy << " x_size: " << ctx->prealloc_size_x << " y_size: " << ctx->prealloc_size_y << " split_k_size: " << ctx->prealloc_size_split_k << ")" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
#if defined(GGML_VULKAN_RUN_TESTS)
|
#if defined(GGML_VULKAN_RUN_TESTS)
|
||||||
ctx->staging = ggml_vk_create_buffer_check(ctx, 100ul * 1024ul * 1024ul, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached);
|
ctx->staging = ggml_vk_create_buffer_check(ctx, 100ul * 1024ul * 1024ul,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||||
ggml_vk_test_transfer(ctx, 8192 * 1000, false);
|
ggml_vk_test_transfer(ctx, 8192 * 1000, false);
|
||||||
ggml_vk_test_transfer(ctx, 8192 * 1000, true);
|
ggml_vk_test_transfer(ctx, 8192 * 1000, true);
|
||||||
|
|
||||||
|
@ -4174,7 +4191,9 @@ static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) {
|
||||||
if (ctx->staging != nullptr) {
|
if (ctx->staging != nullptr) {
|
||||||
ggml_vk_destroy_buffer(ctx->staging);
|
ggml_vk_destroy_buffer(ctx->staging);
|
||||||
}
|
}
|
||||||
ctx->staging = ggml_vk_create_buffer_check(ctx, ctx->staging_size, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached);
|
ctx->staging = ggml_vk_create_buffer_check(ctx, ctx->staging_size,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached,
|
||||||
|
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4537,13 +4556,13 @@ static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_CALL int ggml_vk_get_device_count() {
|
GGML_CALL static int ggml_vk_get_device_count() {
|
||||||
ggml_vk_instance_init();
|
ggml_vk_instance_init();
|
||||||
|
|
||||||
return vk_instance.device_indices.size();
|
return vk_instance.device_indices.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_CALL void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
|
GGML_CALL static void ggml_vk_get_device_description(int device, char * description, size_t description_size) {
|
||||||
ggml_vk_instance_init();
|
ggml_vk_instance_init();
|
||||||
|
|
||||||
std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
|
std::vector<vk::PhysicalDevice> devices = vk_instance.instance.enumeratePhysicalDevices();
|
||||||
|
@ -4561,7 +4580,7 @@ void ggml_vk_init_cpu_assist() {
|
||||||
|
|
||||||
std::cerr << "ggml_vulkan: Found " << ggml_vk_get_device_count() << " Vulkan devices:" << std::endl;
|
std::cerr << "ggml_vulkan: Found " << ggml_vk_get_device_count() << " Vulkan devices:" << std::endl;
|
||||||
|
|
||||||
for (size_t i = 0; i < ggml_vk_get_device_count(); i++) {
|
for (int i = 0; i < ggml_vk_get_device_count(); i++) {
|
||||||
ggml_vk_print_gpu_info(i);
|
ggml_vk_print_gpu_info(i);
|
||||||
}
|
}
|
||||||
// Initialize the first backend to make sure CPU matrix multiplications can be offloaded.
|
// Initialize the first backend to make sure CPU matrix multiplications can be offloaded.
|
||||||
|
@ -5248,7 +5267,7 @@ GGML_CALL void ggml_backend_vk_get_device_description(int device, char * descrip
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
|
GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total) {
|
||||||
GGML_ASSERT(device < vk_instance.device_indices.size());
|
GGML_ASSERT(device < (int) vk_instance.device_indices.size());
|
||||||
|
|
||||||
vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
|
vk::PhysicalDevice vkdev = vk_instance.instance.enumeratePhysicalDevices()[vk_instance.device_indices[device]];
|
||||||
|
|
||||||
|
|
80
ggml.c
80
ggml.c
|
@ -1954,9 +1954,16 @@ struct ggml_numa_node {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_numa_nodes {
|
struct ggml_numa_nodes {
|
||||||
|
enum ggml_numa_strategy numa_strategy;
|
||||||
struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
|
struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
|
||||||
uint32_t n_nodes;
|
uint32_t n_nodes;
|
||||||
uint32_t total_cpus; // hardware threads on system
|
uint32_t total_cpus; // hardware threads on system
|
||||||
|
uint32_t current_node; // node on which main process is execting
|
||||||
|
#ifdef __linux__
|
||||||
|
cpu_set_t cpuset; // cpuset from numactl
|
||||||
|
#else
|
||||||
|
uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -1990,7 +1997,22 @@ inline static void ggml_critical_section_end(void) {
|
||||||
atomic_fetch_sub(&g_state_barrier, 1);
|
atomic_fetch_sub(&g_state_barrier, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_numa_init(void) {
|
#ifdef __linux__
|
||||||
|
static cpu_set_t ggml_get_numa_affinity(void) {
|
||||||
|
cpu_set_t cpuset;
|
||||||
|
pthread_t thread;
|
||||||
|
thread = pthread_self();
|
||||||
|
CPU_ZERO(&cpuset);
|
||||||
|
pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
|
||||||
|
return cpuset;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static uint32_t ggml_get_numa_affinity(void) {
|
||||||
|
return 0; // no NUMA support
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
|
||||||
if (g_state.numa.n_nodes > 0) {
|
if (g_state.numa.n_nodes > 0) {
|
||||||
fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
|
fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
|
||||||
|
|
||||||
|
@ -2002,6 +2024,13 @@ void ggml_numa_init(void) {
|
||||||
char path[256];
|
char path[256];
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
|
// set numa scheme
|
||||||
|
g_state.numa.numa_strategy = numa_flag;
|
||||||
|
|
||||||
|
GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
|
||||||
|
|
||||||
|
g_state.numa.cpuset = ggml_get_numa_affinity();
|
||||||
|
|
||||||
// enumerate nodes
|
// enumerate nodes
|
||||||
while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
|
while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
|
||||||
rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
|
rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
|
||||||
|
@ -2020,11 +2049,17 @@ void ggml_numa_init(void) {
|
||||||
|
|
||||||
GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
|
GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
|
||||||
|
|
||||||
if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
|
// figure out which node we're on
|
||||||
|
uint current_cpu;
|
||||||
|
int getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node);
|
||||||
|
|
||||||
|
if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
|
||||||
g_state.numa.n_nodes = 0;
|
g_state.numa.n_nodes = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
|
||||||
|
|
||||||
for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
|
for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
|
||||||
struct ggml_numa_node * node = &g_state.numa.nodes[n];
|
struct ggml_numa_node * node = &g_state.numa.nodes[n];
|
||||||
GGML_PRINT_DEBUG("CPUs on node %u:", n);
|
GGML_PRINT_DEBUG("CPUs on node %u:", n);
|
||||||
|
@ -16638,26 +16673,46 @@ typedef pthread_t ggml_thread_t;
|
||||||
|
|
||||||
// Android's libc implementation "bionic" does not support setting affinity
|
// Android's libc implementation "bionic" does not support setting affinity
|
||||||
#if defined(__linux__) && !defined(__BIONIC__)
|
#if defined(__linux__) && !defined(__BIONIC__)
|
||||||
static void set_numa_thread_affinity(int thread_n, int n_threads) {
|
static void set_numa_thread_affinity(int thread_n) {
|
||||||
if (!ggml_is_numa()) {
|
if (!ggml_is_numa()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// run thread on node_num thread_n / (threads per node)
|
int node_num;
|
||||||
const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
|
int rv;
|
||||||
struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
|
|
||||||
size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
|
size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
|
||||||
|
|
||||||
|
switch(g_state.numa.numa_strategy) {
|
||||||
|
case GGML_NUMA_STRATEGY_DISTRIBUTE:
|
||||||
|
// run thread on node_num thread_n / (threads per node)
|
||||||
|
node_num = thread_n % g_state.numa.n_nodes;
|
||||||
|
break;
|
||||||
|
case GGML_NUMA_STRATEGY_ISOLATE:
|
||||||
|
// run thread on current_node
|
||||||
|
node_num = g_state.numa.current_node;
|
||||||
|
break;
|
||||||
|
case GGML_NUMA_STRATEGY_NUMACTL:
|
||||||
|
// use the cpuset that numactl gave us
|
||||||
|
rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
|
||||||
|
if (rv) {
|
||||||
|
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
|
||||||
|
|
||||||
cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
|
cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
|
||||||
CPU_ZERO_S(setsize, cpus);
|
CPU_ZERO_S(setsize, cpus);
|
||||||
for (size_t i = 0; i < node->n_cpus; ++i) {
|
for (size_t i = 0; i < node->n_cpus; ++i) {
|
||||||
CPU_SET_S(node->cpus[i], setsize, cpus);
|
CPU_SET_S(node->cpus[i], setsize, cpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
||||||
if (rv) {
|
if (rv) {
|
||||||
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
|
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
|
||||||
strerror(rv));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CPU_FREE(cpus);
|
CPU_FREE(cpus);
|
||||||
|
@ -16678,8 +16733,7 @@ static void clear_numa_thread_affinity(void) {
|
||||||
|
|
||||||
int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
||||||
if (rv) {
|
if (rv) {
|
||||||
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
|
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
|
||||||
strerror(rv));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CPU_FREE(cpus);
|
CPU_FREE(cpus);
|
||||||
|
@ -16687,7 +16741,7 @@ static void clear_numa_thread_affinity(void) {
|
||||||
#else
|
#else
|
||||||
// TODO: Windows etc.
|
// TODO: Windows etc.
|
||||||
// (the linux implementation may also work on BSD, someone should test)
|
// (the linux implementation may also work on BSD, someone should test)
|
||||||
static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
|
static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
|
||||||
static void clear_numa_thread_affinity(void) {}
|
static void clear_numa_thread_affinity(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -16987,7 +17041,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
||||||
|
|
||||||
const int n_threads = state->shared->n_threads;
|
const int n_threads = state->shared->n_threads;
|
||||||
|
|
||||||
set_numa_thread_affinity(state->ith, n_threads);
|
set_numa_thread_affinity(state->ith);
|
||||||
|
|
||||||
int node_n = -1;
|
int node_n = -1;
|
||||||
int task_phase = GGML_TASK_FINALIZE;
|
int task_phase = GGML_TASK_FINALIZE;
|
||||||
|
|
12
ggml.h
12
ggml.h
|
@ -665,6 +665,16 @@ extern "C" {
|
||||||
void * wdata;
|
void * wdata;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// numa strategies
|
||||||
|
enum ggml_numa_strategy {
|
||||||
|
GGML_NUMA_STRATEGY_DISABLED = 0,
|
||||||
|
GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
|
||||||
|
GGML_NUMA_STRATEGY_ISOLATE = 2,
|
||||||
|
GGML_NUMA_STRATEGY_NUMACTL = 3,
|
||||||
|
GGML_NUMA_STRATEGY_MIRROR = 4,
|
||||||
|
GGML_NUMA_STRATEGY_COUNT
|
||||||
|
};
|
||||||
|
|
||||||
// misc
|
// misc
|
||||||
|
|
||||||
GGML_API void ggml_time_init(void); // call this once at the beginning of the program
|
GGML_API void ggml_time_init(void); // call this once at the beginning of the program
|
||||||
|
@ -675,7 +685,7 @@ extern "C" {
|
||||||
|
|
||||||
GGML_API void ggml_print_backtrace(void);
|
GGML_API void ggml_print_backtrace(void);
|
||||||
|
|
||||||
GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
|
GGML_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
|
||||||
GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
||||||
|
|
||||||
GGML_API void ggml_print_object (const struct ggml_object * obj);
|
GGML_API void ggml_print_object (const struct ggml_object * obj);
|
||||||
|
|
45
gguf-py/examples/reader.py
Normal file
45
gguf-py/examples/reader.py
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from gguf.gguf_reader import GGUFReader
|
||||||
|
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||||
|
|
||||||
|
|
||||||
|
def read_gguf_file(gguf_file_path):
|
||||||
|
"""
|
||||||
|
Reads and prints key-value pairs and tensor information from a GGUF file in an improved format.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- gguf_file_path: Path to the GGUF file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
reader = GGUFReader(gguf_file_path)
|
||||||
|
|
||||||
|
# List all key-value pairs in a columnized format
|
||||||
|
print("Key-Value Pairs:")
|
||||||
|
max_key_length = max(len(key) for key in reader.fields.keys())
|
||||||
|
for key, field in reader.fields.items():
|
||||||
|
value = field.parts[field.data[0]]
|
||||||
|
print(f"{key:{max_key_length}} : {value}")
|
||||||
|
print("----")
|
||||||
|
|
||||||
|
# List all tensors
|
||||||
|
print("Tensors:")
|
||||||
|
tensor_info_format = "{:<30} | Shape: {:<15} | Size: {:<12} | Quantization: {}"
|
||||||
|
print(tensor_info_format.format("Tensor Name", "Shape", "Size", "Quantization"))
|
||||||
|
print("-" * 80)
|
||||||
|
for tensor in reader.tensors:
|
||||||
|
shape_str = "x".join(map(str, tensor.shape))
|
||||||
|
size_str = str(tensor.n_elements)
|
||||||
|
quantization_str = tensor.tensor_type.name
|
||||||
|
print(tensor_info_format.format(tensor.name, shape_str, size_str, quantization_str))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
print("Usage: reader.py <path_to_gguf_file>")
|
||||||
|
sys.exit(1)
|
||||||
|
gguf_file_path = sys.argv[1]
|
||||||
|
read_gguf_file(gguf_file_path)
|
|
@ -40,6 +40,7 @@ class Keys:
|
||||||
TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
||||||
EXPERT_COUNT = "{arch}.expert_count"
|
EXPERT_COUNT = "{arch}.expert_count"
|
||||||
EXPERT_USED_COUNT = "{arch}.expert_used_count"
|
EXPERT_USED_COUNT = "{arch}.expert_used_count"
|
||||||
|
POOLING_TYPE = "{arch}.pooling_type"
|
||||||
|
|
||||||
class Attention:
|
class Attention:
|
||||||
HEAD_COUNT = "{arch}.attention.head_count"
|
HEAD_COUNT = "{arch}.attention.head_count"
|
||||||
|
@ -72,6 +73,8 @@ class Keys:
|
||||||
UNK_ID = "tokenizer.ggml.unknown_token_id"
|
UNK_ID = "tokenizer.ggml.unknown_token_id"
|
||||||
SEP_ID = "tokenizer.ggml.seperator_token_id"
|
SEP_ID = "tokenizer.ggml.seperator_token_id"
|
||||||
PAD_ID = "tokenizer.ggml.padding_token_id"
|
PAD_ID = "tokenizer.ggml.padding_token_id"
|
||||||
|
CLS_ID = "tokenizer.ggml.cls_token_id"
|
||||||
|
MASK_ID = "tokenizer.ggml.mask_token_id"
|
||||||
ADD_BOS = "tokenizer.ggml.add_bos_token"
|
ADD_BOS = "tokenizer.ggml.add_bos_token"
|
||||||
ADD_EOS = "tokenizer.ggml.add_eos_token"
|
ADD_EOS = "tokenizer.ggml.add_eos_token"
|
||||||
ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
|
ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
|
||||||
|
@ -97,6 +100,7 @@ class MODEL_ARCH(IntEnum):
|
||||||
PERSIMMON = auto()
|
PERSIMMON = auto()
|
||||||
REFACT = auto()
|
REFACT = auto()
|
||||||
BERT = auto()
|
BERT = auto()
|
||||||
|
NOMIC_BERT = auto()
|
||||||
BLOOM = auto()
|
BLOOM = auto()
|
||||||
STABLELM = auto()
|
STABLELM = auto()
|
||||||
QWEN = auto()
|
QWEN = auto()
|
||||||
|
@ -152,6 +156,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||||
MODEL_ARCH.PERSIMMON: "persimmon",
|
MODEL_ARCH.PERSIMMON: "persimmon",
|
||||||
MODEL_ARCH.REFACT: "refact",
|
MODEL_ARCH.REFACT: "refact",
|
||||||
MODEL_ARCH.BERT: "bert",
|
MODEL_ARCH.BERT: "bert",
|
||||||
|
MODEL_ARCH.NOMIC_BERT: "nomic-bert",
|
||||||
MODEL_ARCH.BLOOM: "bloom",
|
MODEL_ARCH.BLOOM: "bloom",
|
||||||
MODEL_ARCH.STABLELM: "stablelm",
|
MODEL_ARCH.STABLELM: "stablelm",
|
||||||
MODEL_ARCH.QWEN: "qwen",
|
MODEL_ARCH.QWEN: "qwen",
|
||||||
|
@ -281,6 +286,20 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||||
MODEL_TENSOR.FFN_UP,
|
MODEL_TENSOR.FFN_UP,
|
||||||
MODEL_TENSOR.LAYER_OUT_NORM,
|
MODEL_TENSOR.LAYER_OUT_NORM,
|
||||||
],
|
],
|
||||||
|
MODEL_ARCH.NOMIC_BERT: [
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD_NORM,
|
||||||
|
MODEL_TENSOR.TOKEN_TYPES,
|
||||||
|
MODEL_TENSOR.POS_EMBD,
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_OUT_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_QKV,
|
||||||
|
MODEL_TENSOR.ATTN_OUT,
|
||||||
|
MODEL_TENSOR.FFN_GATE,
|
||||||
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
|
MODEL_TENSOR.FFN_UP,
|
||||||
|
MODEL_TENSOR.LAYER_OUT_NORM,
|
||||||
|
],
|
||||||
MODEL_ARCH.MPT: [
|
MODEL_ARCH.MPT: [
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
MODEL_TENSOR.OUTPUT_NORM,
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
@ -542,6 +561,12 @@ class RopeScalingType(Enum):
|
||||||
YARN = 'yarn'
|
YARN = 'yarn'
|
||||||
|
|
||||||
|
|
||||||
|
class PoolingType(IntEnum):
|
||||||
|
NONE = 0
|
||||||
|
MEAN = 1
|
||||||
|
CLS = 2
|
||||||
|
|
||||||
|
|
||||||
class GGMLQuantizationType(IntEnum):
|
class GGMLQuantizationType(IntEnum):
|
||||||
F32 = 0
|
F32 = 0
|
||||||
F16 = 1
|
F16 = 1
|
||||||
|
@ -668,5 +693,7 @@ KEY_TOKENIZER_EOS_ID = Keys.Tokenizer.EOS_ID
|
||||||
KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID
|
KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID
|
||||||
KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID
|
KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID
|
||||||
KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID
|
KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID
|
||||||
|
KEY_TOKENIZER_CLS_ID = Keys.Tokenizer.CLS_ID
|
||||||
|
KEY_TOKENIZER_MASK_ID = Keys.Tokenizer.MASK_ID
|
||||||
KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON
|
KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON
|
||||||
KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV
|
KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV
|
||||||
|
|
|
@ -19,6 +19,7 @@ from .constants import (
|
||||||
GGUFValueType,
|
GGUFValueType,
|
||||||
Keys,
|
Keys,
|
||||||
RopeScalingType,
|
RopeScalingType,
|
||||||
|
PoolingType,
|
||||||
TokenType,
|
TokenType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -360,6 +361,9 @@ class GGUFWriter:
|
||||||
def add_causal_attention(self, value: bool) -> None:
|
def add_causal_attention(self, value: bool) -> None:
|
||||||
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
|
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)
|
||||||
|
|
||||||
|
def add_pooling_type(self, value: PoolingType) -> None:
|
||||||
|
self.add_uint32(Keys.LLM.POOLING_TYPE.format(arch=self.arch), value)
|
||||||
|
|
||||||
def add_rope_dimension_count(self, count: int) -> None:
|
def add_rope_dimension_count(self, count: int) -> None:
|
||||||
self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count)
|
self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count)
|
||||||
|
|
||||||
|
@ -411,6 +415,12 @@ class GGUFWriter:
|
||||||
def add_pad_token_id(self, id: int) -> None:
|
def add_pad_token_id(self, id: int) -> None:
|
||||||
self.add_uint32(Keys.Tokenizer.PAD_ID, id)
|
self.add_uint32(Keys.Tokenizer.PAD_ID, id)
|
||||||
|
|
||||||
|
def add_cls_token_id(self, id: int) -> None:
|
||||||
|
self.add_uint32(Keys.Tokenizer.CLS_ID, id)
|
||||||
|
|
||||||
|
def add_mask_token_id(self, id: int) -> None:
|
||||||
|
self.add_uint32(Keys.Tokenizer.MASK_ID, id)
|
||||||
|
|
||||||
def add_add_bos_token(self, value: bool) -> None:
|
def add_add_bos_token(self, value: bool) -> None:
|
||||||
self.add_bool(Keys.Tokenizer.ADD_BOS, value)
|
self.add_bool(Keys.Tokenizer.ADD_BOS, value)
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ class TensorNameMap:
|
||||||
"word_embeddings", # bloom
|
"word_embeddings", # bloom
|
||||||
"model.embed_tokens", # llama-hf
|
"model.embed_tokens", # llama-hf
|
||||||
"tok_embeddings", # llama-pth
|
"tok_embeddings", # llama-pth
|
||||||
"embeddings.word_embeddings", # bert
|
"embeddings.word_embeddings", # bert nomic-bert
|
||||||
"language_model.embedding.word_embeddings", # persimmon
|
"language_model.embedding.word_embeddings", # persimmon
|
||||||
"wte", # gpt2
|
"wte", # gpt2
|
||||||
"transformer.embd.wte", # phi2
|
"transformer.embd.wte", # phi2
|
||||||
|
@ -24,13 +24,14 @@ class TensorNameMap:
|
||||||
|
|
||||||
# Token type embeddings
|
# Token type embeddings
|
||||||
MODEL_TENSOR.TOKEN_TYPES: (
|
MODEL_TENSOR.TOKEN_TYPES: (
|
||||||
"embeddings.token_type_embeddings", # bert
|
"embeddings.token_type_embeddings", # bert nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
# Normalization of token embeddings
|
# Normalization of token embeddings
|
||||||
MODEL_TENSOR.TOKEN_EMBD_NORM: (
|
MODEL_TENSOR.TOKEN_EMBD_NORM: (
|
||||||
"word_embeddings_layernorm", # bloom
|
"word_embeddings_layernorm", # bloom
|
||||||
"embeddings.LayerNorm", # bert
|
"embeddings.LayerNorm", # bert
|
||||||
|
"emb_ln", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
# Position embeddings
|
# Position embeddings
|
||||||
|
@ -103,6 +104,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.self_attn.query_key_value", # persimmon
|
"model.layers.{bid}.self_attn.query_key_value", # persimmon
|
||||||
"h.{bid}.attn.c_attn", # gpt2
|
"h.{bid}.attn.c_attn", # gpt2
|
||||||
"transformer.h.{bid}.mixer.Wqkv", # phi2
|
"transformer.h.{bid}.mixer.Wqkv", # phi2
|
||||||
|
"encoder.layers.{bid}.attn.Wqkv", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention query
|
# Attention query
|
||||||
|
@ -152,11 +154,13 @@ class TensorNameMap:
|
||||||
"transformer.h.{bid}.mixer.out_proj", # phi2
|
"transformer.h.{bid}.mixer.out_proj", # phi2
|
||||||
"model.layers.layers.{bid}.self_attn.o_proj", # plamo
|
"model.layers.layers.{bid}.self_attn.o_proj", # plamo
|
||||||
"model.layers.{bid}.attention.wo", # internlm2
|
"model.layers.{bid}.attention.wo", # internlm2
|
||||||
|
"encoder.layers.{bid}.attn.out_proj", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention output norm
|
# Attention output norm
|
||||||
MODEL_TENSOR.ATTN_OUT_NORM: (
|
MODEL_TENSOR.ATTN_OUT_NORM: (
|
||||||
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
||||||
|
"encoder.layers.{bid}.norm1", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
# Rotary embeddings
|
# Rotary embeddings
|
||||||
|
@ -205,6 +209,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.mlp.fc1", # phi2
|
"model.layers.{bid}.mlp.fc1", # phi2
|
||||||
"model.layers.layers.{bid}.mlp.up_proj", # plamo
|
"model.layers.layers.{bid}.mlp.up_proj", # plamo
|
||||||
"model.layers.{bid}.feed_forward.w3", # internlm2
|
"model.layers.{bid}.feed_forward.w3", # internlm2
|
||||||
|
"encoder.layers.{bid}.mlp.fc11", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_UP_EXP: (
|
MODEL_TENSOR.FFN_UP_EXP: (
|
||||||
|
@ -224,6 +229,7 @@ class TensorNameMap:
|
||||||
"transformer.h.{bid}.mlp.w2", # qwen
|
"transformer.h.{bid}.mlp.w2", # qwen
|
||||||
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
||||||
"model.layers.{bid}.feed_forward.w1", # internlm2
|
"model.layers.{bid}.feed_forward.w1", # internlm2
|
||||||
|
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||||
|
@ -249,6 +255,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.mlp.fc2", # phi2
|
"model.layers.{bid}.mlp.fc2", # phi2
|
||||||
"model.layers.layers.{bid}.mlp.down_proj", # plamo
|
"model.layers.layers.{bid}.mlp.down_proj", # plamo
|
||||||
"model.layers.{bid}.feed_forward.w2", # internlm2
|
"model.layers.{bid}.feed_forward.w2", # internlm2
|
||||||
|
"encoder.layers.{bid}.mlp.fc2", # nomic-bert
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||||
|
@ -272,6 +279,7 @@ class TensorNameMap:
|
||||||
|
|
||||||
MODEL_TENSOR.LAYER_OUT_NORM: (
|
MODEL_TENSOR.LAYER_OUT_NORM: (
|
||||||
"encoder.layer.{bid}.output.LayerNorm", # bert
|
"encoder.layer.{bid}.output.LayerNorm", # bert
|
||||||
|
"encoder.layers.{bid}.norm2", # nomic-bert
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ class SpecialVocab:
|
||||||
if special_token_types is not None:
|
if special_token_types is not None:
|
||||||
self.special_token_types = special_token_types
|
self.special_token_types = special_token_types
|
||||||
else:
|
else:
|
||||||
self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad')
|
self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad', 'cls', 'mask')
|
||||||
self._load(Path(path))
|
self._load(Path(path))
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
|
@ -152,10 +152,6 @@ class SpecialVocab:
|
||||||
add_entry = tokenizer_config.get(f'add_{typ}_token')
|
add_entry = tokenizer_config.get(f'add_{typ}_token')
|
||||||
if isinstance(add_entry, bool):
|
if isinstance(add_entry, bool):
|
||||||
self.add_special_token[typ] = add_entry
|
self.add_special_token[typ] = add_entry
|
||||||
if not added_tokens:
|
|
||||||
# We will need this to get the content for the token, so if it's empty
|
|
||||||
# may as well just give up.
|
|
||||||
continue
|
|
||||||
entry = tokenizer_config.get(f'{typ}_token')
|
entry = tokenizer_config.get(f'{typ}_token')
|
||||||
if isinstance(entry, str):
|
if isinstance(entry, str):
|
||||||
tc_content = entry
|
tc_content = entry
|
||||||
|
|
|
@ -921,7 +921,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
}
|
}
|
||||||
else if(file_format==FileFormat::GGUF_GENERIC)
|
else if(file_format==FileFormat::GGUF_GENERIC)
|
||||||
{
|
{
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_default_params();
|
llama_model_params model_params = llama_model_default_params();
|
||||||
llama_context_params llama_ctx_params = llama_context_default_params();
|
llama_context_params llama_ctx_params = llama_context_default_params();
|
||||||
|
|
305
llama.cpp
305
llama.cpp
|
@ -221,6 +221,7 @@ enum llm_arch {
|
||||||
LLM_ARCH_PERSIMMON,
|
LLM_ARCH_PERSIMMON,
|
||||||
LLM_ARCH_REFACT,
|
LLM_ARCH_REFACT,
|
||||||
LLM_ARCH_BERT,
|
LLM_ARCH_BERT,
|
||||||
|
LLM_ARCH_NOMIC_BERT,
|
||||||
LLM_ARCH_BLOOM,
|
LLM_ARCH_BLOOM,
|
||||||
LLM_ARCH_STABLELM,
|
LLM_ARCH_STABLELM,
|
||||||
LLM_ARCH_QWEN,
|
LLM_ARCH_QWEN,
|
||||||
|
@ -246,6 +247,7 @@ static std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
||||||
{ LLM_ARCH_PERSIMMON, "persimmon" },
|
{ LLM_ARCH_PERSIMMON, "persimmon" },
|
||||||
{ LLM_ARCH_REFACT, "refact" },
|
{ LLM_ARCH_REFACT, "refact" },
|
||||||
{ LLM_ARCH_BERT, "bert" },
|
{ LLM_ARCH_BERT, "bert" },
|
||||||
|
{ LLM_ARCH_NOMIC_BERT, "nomic-bert" },
|
||||||
{ LLM_ARCH_BLOOM, "bloom" },
|
{ LLM_ARCH_BLOOM, "bloom" },
|
||||||
{ LLM_ARCH_STABLELM, "stablelm" },
|
{ LLM_ARCH_STABLELM, "stablelm" },
|
||||||
{ LLM_ARCH_QWEN, "qwen" },
|
{ LLM_ARCH_QWEN, "qwen" },
|
||||||
|
@ -278,6 +280,7 @@ enum llm_kv {
|
||||||
LLM_KV_TENSOR_DATA_LAYOUT,
|
LLM_KV_TENSOR_DATA_LAYOUT,
|
||||||
LLM_KV_EXPERT_COUNT,
|
LLM_KV_EXPERT_COUNT,
|
||||||
LLM_KV_EXPERT_USED_COUNT,
|
LLM_KV_EXPERT_USED_COUNT,
|
||||||
|
LLM_KV_POOLING_TYPE,
|
||||||
|
|
||||||
LLM_KV_ATTENTION_HEAD_COUNT,
|
LLM_KV_ATTENTION_HEAD_COUNT,
|
||||||
LLM_KV_ATTENTION_HEAD_COUNT_KV,
|
LLM_KV_ATTENTION_HEAD_COUNT_KV,
|
||||||
|
@ -335,6 +338,7 @@ static std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
||||||
{ LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
|
{ LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
|
||||||
{ LLM_KV_EXPERT_COUNT, "%s.expert_count" },
|
{ LLM_KV_EXPERT_COUNT, "%s.expert_count" },
|
||||||
{ LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
|
{ LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
|
||||||
|
{ LLM_KV_POOLING_TYPE , "%s.pooling_type" },
|
||||||
|
|
||||||
{ LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
|
{ LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
|
||||||
{ LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
|
{ LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
|
||||||
|
@ -397,6 +401,7 @@ enum llm_tensor {
|
||||||
LLM_TENSOR_ATTN_OUT,
|
LLM_TENSOR_ATTN_OUT,
|
||||||
LLM_TENSOR_ATTN_NORM,
|
LLM_TENSOR_ATTN_NORM,
|
||||||
LLM_TENSOR_ATTN_NORM_2,
|
LLM_TENSOR_ATTN_NORM_2,
|
||||||
|
LLM_TENSOR_ATTN_OUT_NORM,
|
||||||
LLM_TENSOR_ATTN_ROT_EMBD,
|
LLM_TENSOR_ATTN_ROT_EMBD,
|
||||||
LLM_TENSOR_FFN_GATE_INP,
|
LLM_TENSOR_FFN_GATE_INP,
|
||||||
LLM_TENSOR_FFN_NORM,
|
LLM_TENSOR_FFN_NORM,
|
||||||
|
@ -409,6 +414,7 @@ enum llm_tensor {
|
||||||
LLM_TENSOR_FFN_UP_EXP,
|
LLM_TENSOR_FFN_UP_EXP,
|
||||||
LLM_TENSOR_ATTN_Q_NORM,
|
LLM_TENSOR_ATTN_Q_NORM,
|
||||||
LLM_TENSOR_ATTN_K_NORM,
|
LLM_TENSOR_ATTN_K_NORM,
|
||||||
|
LLM_TENSOR_LAYER_OUT_NORM,
|
||||||
};
|
};
|
||||||
|
|
||||||
static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
|
static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
|
||||||
|
@ -574,12 +580,27 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES =
|
||||||
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
||||||
{ LLM_TENSOR_TOKEN_TYPES, "token_types" },
|
{ LLM_TENSOR_TOKEN_TYPES, "token_types" },
|
||||||
{ LLM_TENSOR_POS_EMBD, "position_embd" },
|
{ LLM_TENSOR_POS_EMBD, "position_embd" },
|
||||||
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_output_norm" },
|
{ LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
|
||||||
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
{ LLM_TENSOR_FFN_NORM, "blk.%d.layer_output_norm" },
|
{ LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_NOMIC_BERT,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
||||||
|
{ LLM_TENSOR_TOKEN_TYPES, "token_types" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
},
|
},
|
||||||
|
@ -1511,6 +1532,7 @@ enum e_model {
|
||||||
MODEL_22M,
|
MODEL_22M,
|
||||||
MODEL_33M,
|
MODEL_33M,
|
||||||
MODEL_109M,
|
MODEL_109M,
|
||||||
|
MODEL_137M,
|
||||||
MODEL_335M,
|
MODEL_335M,
|
||||||
MODEL_0_5B,
|
MODEL_0_5B,
|
||||||
MODEL_1B,
|
MODEL_1B,
|
||||||
|
@ -1567,6 +1589,7 @@ struct llama_hparams {
|
||||||
float f_max_alibi_bias;
|
float f_max_alibi_bias;
|
||||||
|
|
||||||
bool causal_attn = true;
|
bool causal_attn = true;
|
||||||
|
uint32_t pooling_type = LLAMA_POOLING_NONE;
|
||||||
|
|
||||||
|
|
||||||
bool operator!=(const llama_hparams & other) const {
|
bool operator!=(const llama_hparams & other) const {
|
||||||
|
@ -1629,6 +1652,7 @@ struct llama_cparams {
|
||||||
|
|
||||||
bool mul_mat_q;
|
bool mul_mat_q;
|
||||||
bool offload_kqv;
|
bool offload_kqv;
|
||||||
|
bool do_pooling;
|
||||||
|
|
||||||
ggml_backend_sched_eval_callback cb_eval;
|
ggml_backend_sched_eval_callback cb_eval;
|
||||||
void * cb_eval_user_data;
|
void * cb_eval_user_data;
|
||||||
|
@ -1644,6 +1668,8 @@ struct llama_layer {
|
||||||
struct ggml_tensor * attn_q_norm_b;
|
struct ggml_tensor * attn_q_norm_b;
|
||||||
struct ggml_tensor * attn_k_norm;
|
struct ggml_tensor * attn_k_norm;
|
||||||
struct ggml_tensor * attn_k_norm_b;
|
struct ggml_tensor * attn_k_norm_b;
|
||||||
|
struct ggml_tensor * attn_out_norm;
|
||||||
|
struct ggml_tensor * attn_out_norm_b;
|
||||||
|
|
||||||
// attention
|
// attention
|
||||||
struct ggml_tensor * wq;
|
struct ggml_tensor * wq;
|
||||||
|
@ -1662,6 +1688,8 @@ struct llama_layer {
|
||||||
// normalization
|
// normalization
|
||||||
struct ggml_tensor * ffn_norm;
|
struct ggml_tensor * ffn_norm;
|
||||||
struct ggml_tensor * ffn_norm_b;
|
struct ggml_tensor * ffn_norm_b;
|
||||||
|
struct ggml_tensor * layer_out_norm;
|
||||||
|
struct ggml_tensor * layer_out_norm_b;
|
||||||
|
|
||||||
// ff
|
// ff
|
||||||
struct ggml_tensor * ffn_gate; // w1
|
struct ggml_tensor * ffn_gate; // w1
|
||||||
|
@ -1928,7 +1956,8 @@ struct llama_context {
|
||||||
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
||||||
struct ggml_tensor * inp_KQ_mask; // F32 [n_ctx, n_batch]
|
struct ggml_tensor * inp_KQ_mask; // F32 [n_ctx, n_batch]
|
||||||
struct ggml_tensor * inp_K_shift; // I32 [n_ctx]
|
struct ggml_tensor * inp_K_shift; // I32 [n_ctx]
|
||||||
struct ggml_tensor * inp_sum; // F32 [1, n_batch]
|
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
|
||||||
|
struct ggml_tensor * inp_cls; // I32 [n_batch]
|
||||||
|
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ggml_mpi_context * ctx_mpi = NULL;
|
ggml_mpi_context * ctx_mpi = NULL;
|
||||||
|
@ -2897,6 +2926,11 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
|
||||||
|
|
||||||
static const char * llama_model_type_name(e_model type) {
|
static const char * llama_model_type_name(e_model type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
case MODEL_22M: return "22M";
|
||||||
|
case MODEL_33M: return "33M";
|
||||||
|
case MODEL_109M: return "109M";
|
||||||
|
case MODEL_137M: return "137M";
|
||||||
|
case MODEL_0_5B: return "0.5B";
|
||||||
case MODEL_1B: return "1B";
|
case MODEL_1B: return "1B";
|
||||||
case MODEL_2B: return "2B";
|
case MODEL_2B: return "2B";
|
||||||
case MODEL_3B: return "3B";
|
case MODEL_3B: return "3B";
|
||||||
|
@ -3099,6 +3133,7 @@ static void llm_load_hparams(
|
||||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
||||||
ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
|
ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
|
||||||
ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
|
ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
|
||||||
|
ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
|
||||||
|
|
||||||
switch (hparams.n_layer) {
|
switch (hparams.n_layer) {
|
||||||
case 3:
|
case 3:
|
||||||
|
@ -3114,6 +3149,17 @@ static void llm_load_hparams(
|
||||||
model.type = e_model::MODEL_335M; break; // bge-large
|
model.type = e_model::MODEL_335M; break; // bge-large
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case LLM_ARCH_NOMIC_BERT:
|
||||||
|
{
|
||||||
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
||||||
|
ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
|
||||||
|
ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
|
||||||
|
ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
|
||||||
|
|
||||||
|
if (hparams.n_layer == 12 && hparams.n_embd == 768) {
|
||||||
|
model.type = e_model::MODEL_137M;
|
||||||
|
}
|
||||||
|
} break;
|
||||||
case LLM_ARCH_BLOOM:
|
case LLM_ARCH_BLOOM:
|
||||||
{
|
{
|
||||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
||||||
|
@ -3374,7 +3420,12 @@ static void llm_load_vocab(
|
||||||
|
|
||||||
// determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
|
// determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
|
||||||
if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
||||||
|
try {
|
||||||
vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
|
vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
|
||||||
|
} catch (const std::exception & e) {
|
||||||
|
LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
|
||||||
|
vocab.linefeed_id = vocab.special_pad_id;
|
||||||
|
}
|
||||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
|
} else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
|
||||||
vocab.linefeed_id = vocab.special_pad_id;
|
vocab.linefeed_id = vocab.special_pad_id;
|
||||||
} else {
|
} else {
|
||||||
|
@ -3937,10 +3988,14 @@ static bool llm_load_tensors(
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_BERT:
|
case LLM_ARCH_BERT:
|
||||||
|
case LLM_ARCH_NOMIC_BERT:
|
||||||
{
|
{
|
||||||
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||||
model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
|
model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
|
||||||
|
if (model.arch == LLM_ARCH_BERT) {
|
||||||
model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
|
model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
|
||||||
|
}
|
||||||
|
|
||||||
model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
|
model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
|
||||||
model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
|
model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
|
||||||
|
|
||||||
|
@ -3950,12 +4005,7 @@ static bool llm_load_tensors(
|
||||||
|
|
||||||
auto & layer = model.layers[i];
|
auto & layer = model.layers[i];
|
||||||
|
|
||||||
layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
if (model.arch == LLM_ARCH_BERT) {
|
||||||
layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
|
|
||||||
|
|
||||||
layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
|
||||||
layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
|
|
||||||
|
|
||||||
layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
|
layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
|
||||||
layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
|
layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
|
||||||
|
|
||||||
|
@ -3964,15 +4014,29 @@ static bool llm_load_tensors(
|
||||||
|
|
||||||
layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
|
layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
|
||||||
layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
|
layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
|
||||||
|
} else {
|
||||||
|
layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
|
||||||
|
}
|
||||||
|
|
||||||
layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
|
layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
|
||||||
layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
|
|
||||||
|
layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
|
||||||
|
layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
|
||||||
|
|
||||||
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||||
|
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
||||||
|
|
||||||
|
if (model.arch == LLM_ARCH_BERT) {
|
||||||
|
layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
|
||||||
layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
|
layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
|
||||||
|
|
||||||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
|
||||||
layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
|
layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
|
||||||
|
} else {
|
||||||
|
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||||
|
}
|
||||||
|
|
||||||
|
layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
|
||||||
|
layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_BLOOM:
|
case LLM_ARCH_BLOOM:
|
||||||
|
@ -4451,9 +4515,21 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||||
|
|
||||||
model.hparams.vocab_only = params.vocab_only;
|
model.hparams.vocab_only = params.vocab_only;
|
||||||
|
|
||||||
llm_load_arch (ml, model);
|
try {
|
||||||
|
llm_load_arch(ml, model);
|
||||||
|
} catch(const std::exception & e) {
|
||||||
|
throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
|
||||||
|
}
|
||||||
|
try {
|
||||||
llm_load_hparams(ml, model);
|
llm_load_hparams(ml, model);
|
||||||
llm_load_vocab (ml, model);
|
} catch(const std::exception & e) {
|
||||||
|
throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
llm_load_vocab(ml, model);
|
||||||
|
} catch(const std::exception & e) {
|
||||||
|
throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
|
||||||
|
}
|
||||||
|
|
||||||
llm_load_print_meta(ml, model);
|
llm_load_print_meta(ml, model);
|
||||||
|
|
||||||
|
@ -4931,7 +5007,7 @@ struct llm_build_context {
|
||||||
const int32_t n_orig_ctx;
|
const int32_t n_orig_ctx;
|
||||||
|
|
||||||
const bool do_rope_shift;
|
const bool do_rope_shift;
|
||||||
const bool causal_attn;
|
const uint32_t pooling_type;
|
||||||
|
|
||||||
const llm_build_cb & cb;
|
const llm_build_cb & cb;
|
||||||
|
|
||||||
|
@ -4975,7 +5051,7 @@ struct llm_build_context {
|
||||||
kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
|
kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
|
||||||
n_orig_ctx (cparams.n_yarn_orig_ctx),
|
n_orig_ctx (cparams.n_yarn_orig_ctx),
|
||||||
do_rope_shift (worst_case || kv_self.has_shift),
|
do_rope_shift (worst_case || kv_self.has_shift),
|
||||||
causal_attn (hparams.causal_attn),
|
pooling_type (cparams.do_pooling ? hparams.pooling_type : (uint32_t)LLAMA_POOLING_NONE),
|
||||||
cb (cb),
|
cb (cb),
|
||||||
buf_compute_meta (lctx.buf_compute_meta) {
|
buf_compute_meta (lctx.buf_compute_meta) {
|
||||||
// all initializations should be done in init()
|
// all initializations should be done in init()
|
||||||
|
@ -5823,22 +5899,27 @@ struct llm_build_context {
|
||||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
|
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
|
||||||
|
|
||||||
const int64_t n_embd_head = hparams.n_embd_head_v;
|
const int64_t n_embd_head = hparams.n_embd_head_v;
|
||||||
|
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
|
||||||
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
||||||
GGML_ASSERT(n_embd_head == hparams.n_rot);
|
|
||||||
|
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
// get input vectors with right size
|
// get input vectors with right size
|
||||||
|
const size_t stride1 = n_tokens * ggml_type_size(lctx.inp_tokens->type);
|
||||||
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
|
||||||
struct ggml_tensor * inp_sum = ggml_view_1d(ctx0, lctx.inp_sum, n_tokens, 0);
|
struct ggml_tensor * inp_mean = ggml_view_2d(ctx0, lctx.inp_mean, n_tokens, n_tokens, stride1, 0);
|
||||||
|
struct ggml_tensor * inp_cls = ggml_view_1d(ctx0, lctx.inp_cls, n_tokens, 0);
|
||||||
|
|
||||||
// construct input embeddings (token, type, position)
|
// construct input embeddings (token, type, position)
|
||||||
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
|
||||||
|
|
||||||
// token types are hardcoded to zero ("Sentence A")
|
// token types are hardcoded to zero ("Sentence A")
|
||||||
struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
|
struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
|
||||||
inpL = ggml_add(ctx0, inpL, type_row0);
|
inpL = ggml_add(ctx0, inpL, type_row0);
|
||||||
|
if (model.arch == LLM_ARCH_BERT) {
|
||||||
inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
|
inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
|
||||||
|
}
|
||||||
cb(inpL, "inp_embd", -1);
|
cb(inpL, "inp_embd", -1);
|
||||||
|
|
||||||
// embed layer norm
|
// embed layer norm
|
||||||
|
@ -5854,7 +5935,7 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * cur = inpL;
|
struct ggml_tensor * cur = inpL;
|
||||||
|
|
||||||
// self-attention
|
// self-attention
|
||||||
{
|
if (model.arch == LLM_ARCH_BERT) {
|
||||||
struct ggml_tensor * Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
|
struct ggml_tensor * Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
|
||||||
cb(Qcur, "Qcur", il);
|
cb(Qcur, "Qcur", il);
|
||||||
|
|
||||||
|
@ -5867,6 +5948,37 @@ struct llm_build_context {
|
||||||
// seems like we just need to do this for Q?
|
// seems like we just need to do this for Q?
|
||||||
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
||||||
|
|
||||||
|
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
|
||||||
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
|
Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
|
cb(cur, "kqv_out", il);
|
||||||
|
} else {
|
||||||
|
// compute Q and K and RoPE them
|
||||||
|
cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
|
||||||
|
cb(cur, "wqkv", il);
|
||||||
|
|
||||||
|
struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
|
||||||
|
struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
|
||||||
|
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
|
||||||
|
|
||||||
|
cb(Qcur, "Qcur", il);
|
||||||
|
cb(Kcur, "Kcur", il);
|
||||||
|
cb(Vcur, "Vcur", il);
|
||||||
|
|
||||||
|
Qcur = ggml_rope_custom(
|
||||||
|
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
|
||||||
|
hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
|
||||||
|
ext_factor, attn_factor, beta_fast, beta_slow
|
||||||
|
);
|
||||||
|
cb(Qcur, "Qcur", il);
|
||||||
|
|
||||||
|
Kcur = ggml_rope_custom(
|
||||||
|
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
|
||||||
|
hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
|
||||||
|
ext_factor, attn_factor, beta_fast, beta_slow
|
||||||
|
);
|
||||||
|
cb(Kcur, "Kcur", il);
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
|
@ -5877,25 +5989,34 @@ struct llm_build_context {
|
||||||
cur = ggml_add(ctx0, cur, inpL);
|
cur = ggml_add(ctx0, cur, inpL);
|
||||||
|
|
||||||
// attention layer norm
|
// attention layer norm
|
||||||
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm, model.layers[il].attn_norm_b, LLM_NORM, cb, il);
|
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
|
||||||
|
|
||||||
struct ggml_tensor * ffn_inp = cur;
|
struct ggml_tensor * ffn_inp = cur;
|
||||||
cb(ffn_inp, "ffn_inp", il);
|
cb(ffn_inp, "ffn_inp", il);
|
||||||
|
|
||||||
// feed-forward network
|
// feed-forward network
|
||||||
|
if (model.arch == LLM_ARCH_BERT) {
|
||||||
cur = llm_build_ffn(ctx0, cur,
|
cur = llm_build_ffn(ctx0, cur,
|
||||||
model.layers[il].ffn_up, model.layers[il].ffn_up_b,
|
model.layers[il].ffn_up, model.layers[il].ffn_up_b,
|
||||||
NULL, NULL,
|
NULL, NULL,
|
||||||
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
||||||
NULL,
|
NULL,
|
||||||
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
|
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
|
||||||
|
} else {
|
||||||
|
cur = llm_build_ffn(ctx0, cur,
|
||||||
|
model.layers[il].ffn_up, NULL,
|
||||||
|
model.layers[il].ffn_gate, NULL,
|
||||||
|
model.layers[il].ffn_down, NULL,
|
||||||
|
NULL,
|
||||||
|
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
|
||||||
|
}
|
||||||
cb(cur, "ffn_out", il);
|
cb(cur, "ffn_out", il);
|
||||||
|
|
||||||
// attentions bypass the intermediate layer
|
// attentions bypass the intermediate layer
|
||||||
cur = ggml_add(ctx0, cur, ffn_inp);
|
cur = ggml_add(ctx0, cur, ffn_inp);
|
||||||
|
|
||||||
// output layer norm
|
// output layer norm
|
||||||
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, LLM_NORM, cb, il);
|
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
|
||||||
|
|
||||||
// input for next layer
|
// input for next layer
|
||||||
inpL = cur;
|
inpL = cur;
|
||||||
|
@ -5904,9 +6025,15 @@ struct llm_build_context {
|
||||||
// final output
|
// final output
|
||||||
cur = inpL;
|
cur = inpL;
|
||||||
|
|
||||||
// pooling
|
// pooling layer
|
||||||
cur = ggml_mul_mat(ctx0, inp_sum, ggml_cont(ctx0, ggml_transpose(ctx0, cur)));
|
if (pooling_type == LLAMA_POOLING_MEAN) {
|
||||||
cb(cur, "result_embed", -1);
|
cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, cur)), inp_mean);
|
||||||
|
} else if (pooling_type == LLAMA_POOLING_CLS) {
|
||||||
|
cur = ggml_get_rows(ctx0, cur, inp_cls);
|
||||||
|
} else {
|
||||||
|
GGML_ASSERT(pooling_type == LLAMA_POOLING_NONE && "Invalid pooling type");
|
||||||
|
}
|
||||||
|
cb(cur, "result_embd", -1);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, cur);
|
ggml_build_forward_expand(gf, cur);
|
||||||
|
|
||||||
|
@ -7336,6 +7463,7 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
result = llm.build_refact();
|
result = llm.build_refact();
|
||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_BERT:
|
case LLM_ARCH_BERT:
|
||||||
|
case LLM_ARCH_NOMIC_BERT:
|
||||||
{
|
{
|
||||||
result = llm.build_bert();
|
result = llm.build_bert();
|
||||||
} break;
|
} break;
|
||||||
|
@ -7439,7 +7567,8 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
|
|
||||||
for (int i = 0; i < n_kv; ++i) {
|
for (int i = 0; i < n_kv; ++i) {
|
||||||
float f;
|
float f;
|
||||||
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
|
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) ||
|
||||||
|
(hparams.causal_attn && lctx.kv_self.cells[i].pos > pos)) {
|
||||||
f = -INFINITY;
|
f = -INFINITY;
|
||||||
} else {
|
} else {
|
||||||
f = 0;
|
f = 0;
|
||||||
|
@ -7450,16 +7579,6 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
{
|
|
||||||
assert(ggml_backend_buffer_is_host(lctx.inp_sum->buffer));
|
|
||||||
float * data = (float *) lctx.inp_sum->data;
|
|
||||||
|
|
||||||
for (int i = 0; i < batch.n_tokens; ++i) {
|
|
||||||
data[i] = 1.0f/float(batch.n_tokens);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (kv_self.has_shift) {
|
if (kv_self.has_shift) {
|
||||||
const int64_t n_ctx = cparams.n_ctx;
|
const int64_t n_ctx = cparams.n_ctx;
|
||||||
|
|
||||||
|
@ -7471,6 +7590,49 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
data[i] = lctx.kv_self.cells[i].delta;
|
data[i] = lctx.kv_self.cells[i].delta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cparams.do_pooling && hparams.pooling_type == LLAMA_POOLING_MEAN) {
|
||||||
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
|
||||||
|
float * data = (float *) lctx.inp_mean->data;
|
||||||
|
|
||||||
|
memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
|
||||||
|
|
||||||
|
std::vector<uint64_t> sum(n_tokens, 0);
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
sum[seq_id] += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> div(n_tokens, 0.0f);
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const uint64_t s = sum[i];
|
||||||
|
if (s > 0) {
|
||||||
|
div[i] = 1.0f/float(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
data[seq_id*n_tokens + i] = div[seq_id];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cparams.do_pooling && hparams.pooling_type == LLAMA_POOLING_CLS) {
|
||||||
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
|
||||||
|
uint32_t * data = (uint32_t *) lctx.inp_cls->data;
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
const llama_pos pos = batch.pos[i];
|
||||||
|
if (pos == 0) {
|
||||||
|
data[seq_id] = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode a batch of tokens by evaluating the transformer
|
// decode a batch of tokens by evaluating the transformer
|
||||||
|
@ -7582,7 +7744,7 @@ static int llama_decode_internal(
|
||||||
embeddings = gf->nodes[gf->n_nodes - 3];
|
embeddings = gf->nodes[gf->n_nodes - 3];
|
||||||
GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
|
GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
|
||||||
}
|
}
|
||||||
} else if (strcmp(res->name, "result_embed") == 0) {
|
} else if (strcmp(res->name, "result_embd") == 0) {
|
||||||
embeddings = res;
|
embeddings = res;
|
||||||
res = nullptr;
|
res = nullptr;
|
||||||
} else {
|
} else {
|
||||||
|
@ -7705,11 +7867,12 @@ static int llama_decode_internal(
|
||||||
if (!lctx.embedding.empty()) {
|
if (!lctx.embedding.empty()) {
|
||||||
auto & embedding_out = lctx.embedding;
|
auto & embedding_out = lctx.embedding;
|
||||||
|
|
||||||
const int64_t embed_pos = res ? n_embd * (n_tokens-1) : 0;
|
const int64_t embd_pos = res ? n_embd * (n_tokens-1) : 0;
|
||||||
|
const int64_t embd_size = res ? n_embd : n_embd * n_tokens;
|
||||||
|
|
||||||
embedding_out.resize(n_embd);
|
embedding_out.resize(embd_size);
|
||||||
ggml_backend_t embeddings_backend = ggml_backend_sched_get_node_backend(lctx.sched, embeddings);
|
ggml_backend_t embeddings_backend = ggml_backend_sched_get_node_backend(lctx.sched, embeddings);
|
||||||
ggml_backend_tensor_get_async(embeddings_backend, embeddings, embedding_out.data(), embed_pos*sizeof(float), n_embd*sizeof(float));
|
ggml_backend_tensor_get_async(embeddings_backend, embeddings, embedding_out.data(), embd_pos*sizeof(float), embd_size*sizeof(float));
|
||||||
ggml_backend_synchronize(embeddings_backend);
|
ggml_backend_synchronize(embeddings_backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7787,7 +7950,13 @@ static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
|
||||||
switch (llama_vocab_get_type(vocab)) {
|
switch (llama_vocab_get_type(vocab)) {
|
||||||
case LLAMA_VOCAB_TYPE_SPM: {
|
case LLAMA_VOCAB_TYPE_SPM: {
|
||||||
const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
|
const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
|
||||||
return vocab.token_to_id.at(buf);
|
auto token = vocab.token_to_id.find(buf);
|
||||||
|
if (token != vocab.token_to_id.end()) {
|
||||||
|
return (*token).second;
|
||||||
|
}
|
||||||
|
// Try to fall back to just the byte as a string
|
||||||
|
const char buf2[2] = { (char)ch, 0 };
|
||||||
|
return vocab.token_to_id.at(buf2);
|
||||||
}
|
}
|
||||||
case LLAMA_VOCAB_TYPE_WPM:
|
case LLAMA_VOCAB_TYPE_WPM:
|
||||||
case LLAMA_VOCAB_TYPE_BPE: {
|
case LLAMA_VOCAB_TYPE_BPE: {
|
||||||
|
@ -7836,7 +8005,7 @@ struct llm_bigram_spm {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llm_tokenizer_spm {
|
struct llm_tokenizer_spm {
|
||||||
llm_tokenizer_spm(const llama_vocab & vocab): vocab(vocab) {}
|
llm_tokenizer_spm(const llama_vocab & vocab) : vocab(vocab) {}
|
||||||
|
|
||||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||||
// split string into utf8 chars
|
// split string into utf8 chars
|
||||||
|
@ -7911,6 +8080,7 @@ private:
|
||||||
|
|
||||||
if (p == rev_merge.end()) {
|
if (p == rev_merge.end()) {
|
||||||
// output any symbols that did not form tokens as bytes.
|
// output any symbols that did not form tokens as bytes.
|
||||||
|
output.reserve(output.size() + symbol.n);
|
||||||
for (int j = 0; j < (int)symbol.n; ++j) {
|
for (int j = 0; j < (int)symbol.n; ++j) {
|
||||||
llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
|
llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
|
||||||
output.push_back(token_id);
|
output.push_back(token_id);
|
||||||
|
@ -8692,17 +8862,18 @@ struct fragment_buffer_variant {
|
||||||
token(_token),
|
token(_token),
|
||||||
raw_text(_dummy),
|
raw_text(_dummy),
|
||||||
offset(0),
|
offset(0),
|
||||||
length(0){}
|
length(0) {}
|
||||||
|
|
||||||
fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
|
fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
|
||||||
:
|
:
|
||||||
type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
|
type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
|
||||||
token((llama_vocab::id)-1),
|
token((llama_vocab::id) - 1),
|
||||||
raw_text(_raw_text),
|
raw_text(_raw_text),
|
||||||
offset(_offset),
|
offset(_offset),
|
||||||
length(_length){
|
length(_length){
|
||||||
GGML_ASSERT( _offset >= 0 );
|
GGML_ASSERT(_offset >= 0);
|
||||||
GGML_ASSERT( _length >= 1 );
|
GGML_ASSERT(_length >= 1);
|
||||||
GGML_ASSERT( offset + length <= raw_text.length() );
|
GGML_ASSERT(offset + length <= raw_text.length());
|
||||||
}
|
}
|
||||||
|
|
||||||
const FRAGMENT_BUFFER_VARIANT_TYPE type;
|
const FRAGMENT_BUFFER_VARIANT_TYPE type;
|
||||||
|
@ -8826,14 +8997,14 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
}
|
}
|
||||||
|
|
||||||
std::forward_list<fragment_buffer_variant> fragment_buffer;
|
std::forward_list<fragment_buffer_variant> fragment_buffer;
|
||||||
fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
|
fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
|
||||||
|
|
||||||
if (special) tokenizer_st_partition( vocab, fragment_buffer );
|
if (special) tokenizer_st_partition(vocab, fragment_buffer);
|
||||||
|
|
||||||
switch (vocab.type) {
|
switch (vocab.type) {
|
||||||
case LLAMA_VOCAB_TYPE_SPM:
|
case LLAMA_VOCAB_TYPE_SPM:
|
||||||
{
|
{
|
||||||
for (const auto & fragment: fragment_buffer) {
|
for (const auto & fragment : fragment_buffer) {
|
||||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||||
// without adding this leading whitespace, we do not get the same results as the original tokenizer
|
// without adding this leading whitespace, we do not get the same results as the original tokenizer
|
||||||
|
|
||||||
|
@ -8861,7 +9032,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_VOCAB_TYPE_BPE:
|
case LLAMA_VOCAB_TYPE_BPE:
|
||||||
{
|
{
|
||||||
for (const auto & fragment: fragment_buffer) {
|
for (const auto & fragment : fragment_buffer) {
|
||||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||||
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
||||||
|
|
||||||
|
@ -8887,7 +9058,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_VOCAB_TYPE_WPM:
|
case LLAMA_VOCAB_TYPE_WPM:
|
||||||
{
|
{
|
||||||
for (const auto & fragment: fragment_buffer) {
|
for (const auto & fragment : fragment_buffer) {
|
||||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||||
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
||||||
|
|
||||||
|
@ -10767,7 +10938,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
quantize &= !params->only_copy;
|
quantize &= !params->only_copy;
|
||||||
|
|
||||||
// do not quantize expert gating tensors
|
// do not quantize expert gating tensors
|
||||||
quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
|
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
|
||||||
|
|
||||||
|
// do not quantize positional embeddings and token types (BERT)
|
||||||
|
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
|
||||||
|
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
|
||||||
|
|
||||||
enum ggml_type new_type;
|
enum ggml_type new_type;
|
||||||
void * new_data;
|
void * new_data;
|
||||||
|
@ -11041,7 +11216,7 @@ static int llama_apply_lora_from_file_internal(
|
||||||
{
|
{
|
||||||
LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
|
LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
|
||||||
__func__, ftype);
|
__func__, ftype);
|
||||||
return false;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11269,6 +11444,7 @@ struct llama_context_params llama_context_default_params() {
|
||||||
/*.logits_all =*/ false,
|
/*.logits_all =*/ false,
|
||||||
/*.embedding =*/ false,
|
/*.embedding =*/ false,
|
||||||
/*.offload_kqv =*/ true,
|
/*.offload_kqv =*/ true,
|
||||||
|
/*.do_pooling =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -11319,7 +11495,7 @@ bool llama_mlock_supported(void) {
|
||||||
return llama_supports_mlock();
|
return llama_supports_mlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_backend_init(bool numa) {
|
void llama_backend_init(void) {
|
||||||
ggml_time_init();
|
ggml_time_init();
|
||||||
|
|
||||||
// needed to initialize f16 tables
|
// needed to initialize f16 tables
|
||||||
|
@ -11329,15 +11505,17 @@ void llama_backend_init(bool numa) {
|
||||||
ggml_free(ctx);
|
ggml_free(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numa) {
|
|
||||||
ggml_numa_init();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ggml_mpi_backend_init();
|
ggml_mpi_backend_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void llama_numa_init(enum ggml_numa_strategy numa) {
|
||||||
|
if (numa != GGML_NUMA_STRATEGY_DISABLED) {
|
||||||
|
ggml_numa_init(numa);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void llama_backend_free(void) {
|
void llama_backend_free(void) {
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ggml_mpi_backend_free();
|
ggml_mpi_backend_free();
|
||||||
|
@ -11414,6 +11592,7 @@ struct llama_context * llama_new_context_with_model(
|
||||||
cparams.yarn_beta_slow = params.yarn_beta_slow;
|
cparams.yarn_beta_slow = params.yarn_beta_slow;
|
||||||
cparams.mul_mat_q = params.mul_mat_q;
|
cparams.mul_mat_q = params.mul_mat_q;
|
||||||
cparams.offload_kqv = params.offload_kqv;
|
cparams.offload_kqv = params.offload_kqv;
|
||||||
|
cparams.do_pooling = params.do_pooling;
|
||||||
|
|
||||||
cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
|
cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
|
||||||
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
|
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
|
||||||
|
@ -11561,7 +11740,7 @@ struct llama_context * llama_new_context_with_model(
|
||||||
// resized during inference, reserve maximum
|
// resized during inference, reserve maximum
|
||||||
ctx->logits.reserve(hparams.n_vocab*cparams.n_batch);
|
ctx->logits.reserve(hparams.n_vocab*cparams.n_batch);
|
||||||
|
|
||||||
if (params.embedding){
|
if (params.embedding) {
|
||||||
ctx->embedding.resize(hparams.n_embd);
|
ctx->embedding.resize(hparams.n_embd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11579,14 +11758,16 @@ struct llama_context * llama_new_context_with_model(
|
||||||
ctx->inp_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
|
ctx->inp_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
|
||||||
ctx->inp_KQ_mask = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_ctx, cparams.n_batch);
|
ctx->inp_KQ_mask = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_ctx, cparams.n_batch);
|
||||||
ctx->inp_K_shift = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_ctx);
|
ctx->inp_K_shift = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_ctx);
|
||||||
ctx->inp_sum = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, 1, cparams.n_batch);
|
ctx->inp_mean = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_batch, cparams.n_batch);
|
||||||
|
ctx->inp_cls = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
|
||||||
|
|
||||||
ggml_set_name(ctx->inp_tokens, "inp_tokens");
|
ggml_set_name(ctx->inp_tokens, "inp_tokens");
|
||||||
ggml_set_name(ctx->inp_embd, "inp_embd");
|
ggml_set_name(ctx->inp_embd, "inp_embd");
|
||||||
ggml_set_name(ctx->inp_pos, "inp_pos");
|
ggml_set_name(ctx->inp_pos, "inp_pos");
|
||||||
ggml_set_name(ctx->inp_KQ_mask, "inp_KQ_mask");
|
ggml_set_name(ctx->inp_KQ_mask, "inp_KQ_mask");
|
||||||
ggml_set_name(ctx->inp_K_shift, "inp_K_shift");
|
ggml_set_name(ctx->inp_K_shift, "inp_K_shift");
|
||||||
ggml_set_name(ctx->inp_sum, "inp_sum");
|
ggml_set_name(ctx->inp_mean, "inp_mean");
|
||||||
|
ggml_set_name(ctx->inp_cls, "inp_cls");
|
||||||
|
|
||||||
ctx->buf_input = ggml_backend_alloc_ctx_tensors_from_buft(ctx->ctx_input, llama_default_buffer_type_cpu(true));
|
ctx->buf_input = ggml_backend_alloc_ctx_tensors_from_buft(ctx->ctx_input, llama_default_buffer_type_cpu(true));
|
||||||
|
|
||||||
|
@ -12447,6 +12628,10 @@ float * llama_get_embeddings(struct llama_context * ctx) {
|
||||||
return ctx->embedding.data();
|
return ctx->embedding.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
|
||||||
|
return ctx->embedding.data() + i*ctx->model.hparams.n_embd;
|
||||||
|
}
|
||||||
|
|
||||||
const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
|
const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
|
||||||
return model->vocab.id_to_token[token].text.c_str();
|
return model->vocab.id_to_token[token].text.c_str();
|
||||||
}
|
}
|
||||||
|
|
16
llama.h
16
llama.h
|
@ -112,6 +112,12 @@ extern "C" {
|
||||||
LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
|
LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum llama_pooling_type {
|
||||||
|
LLAMA_POOLING_NONE = 0,
|
||||||
|
LLAMA_POOLING_MEAN = 1,
|
||||||
|
LLAMA_POOLING_CLS = 2,
|
||||||
|
};
|
||||||
|
|
||||||
enum llama_split_mode {
|
enum llama_split_mode {
|
||||||
LLAMA_SPLIT_NONE = 0, // single GPU
|
LLAMA_SPLIT_NONE = 0, // single GPU
|
||||||
LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs
|
LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs
|
||||||
|
@ -236,6 +242,7 @@ extern "C" {
|
||||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
|
bool logits_all; // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
|
||||||
bool embedding; // embedding mode only
|
bool embedding; // embedding mode only
|
||||||
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
|
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
|
||||||
|
bool do_pooling; // whether to pool (sum) embedding results by sequence id (ignored if no pooling layer)
|
||||||
};
|
};
|
||||||
|
|
||||||
// model quantization parameters
|
// model quantization parameters
|
||||||
|
@ -305,7 +312,10 @@ extern "C" {
|
||||||
// Initialize the llama + ggml backend
|
// Initialize the llama + ggml backend
|
||||||
// If numa is true, use NUMA optimizations
|
// If numa is true, use NUMA optimizations
|
||||||
// Call once at the start of the program
|
// Call once at the start of the program
|
||||||
LLAMA_API void llama_backend_init(bool numa);
|
LLAMA_API void llama_backend_init(void);
|
||||||
|
|
||||||
|
//optional:
|
||||||
|
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
|
||||||
|
|
||||||
// Call once at the end of the program - currently only used for MPI
|
// Call once at the end of the program - currently only used for MPI
|
||||||
LLAMA_API void llama_backend_free(void);
|
LLAMA_API void llama_backend_free(void);
|
||||||
|
@ -630,6 +640,10 @@ extern "C" {
|
||||||
// shape: [n_embd] (1-dimensional)
|
// shape: [n_embd] (1-dimensional)
|
||||||
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Get the embeddings for the ith sequence
|
||||||
|
// llama_get_embeddings(ctx) + i*n_embd
|
||||||
|
LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Vocab
|
// Vocab
|
||||||
//
|
//
|
||||||
|
|
37
scripts/compare-commits.sh
Executable file
37
scripts/compare-commits.sh
Executable file
|
@ -0,0 +1,37 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ $# -lt 2 ]; then
|
||||||
|
echo "usage: ./scripts/compare-commits.sh <commit1> <commit2> [additional llama-bench arguments]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
bench_args="${@:3}"
|
||||||
|
|
||||||
|
rm -f llama-bench.sqlite
|
||||||
|
|
||||||
|
backend="cpu"
|
||||||
|
|
||||||
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
|
backend="metal"
|
||||||
|
elif command -v nvcc &> /dev/null; then
|
||||||
|
backend="cuda"
|
||||||
|
fi
|
||||||
|
|
||||||
|
make_opts=""
|
||||||
|
|
||||||
|
if [[ "$backend" == "cuda" ]]; then
|
||||||
|
make_opts="LLAMA_CUBLAS=1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
git checkout $1
|
||||||
|
make clean && make -j32 $make_opts llama-bench
|
||||||
|
./llama-bench -o sql $bench_args | tee /dev/tty | sqlite3 llama-bench.sqlite
|
||||||
|
|
||||||
|
git checkout $2
|
||||||
|
make clean && make -j32 $make_opts llama-bench
|
||||||
|
./llama-bench -o sql $bench_args | tee /dev/tty | sqlite3 llama-bench.sqlite
|
||||||
|
|
||||||
|
./scripts/compare-llama-bench.py -b $1 -c $2
|
107
scripts/hf.sh
Executable file
107
scripts/hf.sh
Executable file
|
@ -0,0 +1,107 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Shortcut for downloading HF models
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./main -m $(./examples/hf.sh https://huggingface.co/TheBloke/Mixtral-8x7B-v0.1-GGUF/resolve/main/mixtral-8x7b-v0.1.Q4_K_M.gguf)
|
||||||
|
# ./main -m $(./examples/hf.sh --url https://huggingface.co/TheBloke/Mixtral-8x7B-v0.1-GGUF/blob/main/mixtral-8x7b-v0.1.Q4_K_M.gguf)
|
||||||
|
# ./main -m $(./examples/hf.sh --repo TheBloke/Mixtral-8x7B-v0.1-GGUF --file mixtral-8x7b-v0.1.Q4_K_M.gguf)
|
||||||
|
#
|
||||||
|
|
||||||
|
# all logs go to stderr
|
||||||
|
function log {
|
||||||
|
echo "$@" 1>&2
|
||||||
|
}
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
log "Usage: $0 [[--url] <url>] [--repo <repo>] [--file <file>] [-h|--help]"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# check for curl or wget
|
||||||
|
function has_cmd {
|
||||||
|
if ! [ -x "$(command -v $1)" ]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if has_cmd wget; then
|
||||||
|
cmd="wget -q --show-progress -c -O %s %s"
|
||||||
|
elif has_cmd curl; then
|
||||||
|
cmd="curl -C - -f -o %s -L %s"
|
||||||
|
else
|
||||||
|
log "[E] curl or wget not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
url=""
|
||||||
|
repo=""
|
||||||
|
file=""
|
||||||
|
|
||||||
|
# parse args
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--url)
|
||||||
|
url="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--repo)
|
||||||
|
repo="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--file)
|
||||||
|
file="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
url="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -n "$repo" ] && [ -n "$file" ]; then
|
||||||
|
url="https://huggingface.co/$repo/resolve/main/$file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$url" ]; then
|
||||||
|
log "[E] missing --url"
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if the URL is a HuggingFace model, and if so, try to download it
|
||||||
|
is_url=false
|
||||||
|
|
||||||
|
if [[ ${#url} -gt 22 ]]; then
|
||||||
|
if [[ ${url:0:22} == "https://huggingface.co" ]]; then
|
||||||
|
is_url=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$is_url" = false ]; then
|
||||||
|
log "[E] invalid URL, must start with https://huggingface.co"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# replace "blob/main" with "resolve/main"
|
||||||
|
url=${url/blob\/main/resolve\/main}
|
||||||
|
|
||||||
|
basename=$(basename $url)
|
||||||
|
|
||||||
|
log "[+] attempting to download $basename"
|
||||||
|
|
||||||
|
if [ -n "$cmd" ]; then
|
||||||
|
cmd=$(printf "$cmd" "$basename" "$url")
|
||||||
|
log "[+] $cmd"
|
||||||
|
if $cmd; then
|
||||||
|
echo $basename
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "[-] failed to download"
|
||||||
|
|
||||||
|
exit 1
|
|
@ -12,7 +12,7 @@ int main(int argc, char ** argv) {
|
||||||
auto * model_path = get_model_or_exit(argc, argv);
|
auto * model_path = get_model_or_exit(argc, argv);
|
||||||
|
|
||||||
std::thread([&model_path]() {
|
std::thread([&model_path]() {
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
|
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
|
||||||
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
|
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
|
|
|
@ -2129,14 +2129,13 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||||
test_cases.emplace_back(new test_pad());
|
test_cases.emplace_back(new test_pad());
|
||||||
test_cases.emplace_back(new test_leaky_relu());
|
test_cases.emplace_back(new test_leaky_relu());
|
||||||
|
|
||||||
|
// these tests are disabled to save execution time, but they can be handy for debugging
|
||||||
|
#if 0
|
||||||
#if !defined(__SANITIZE_THREAD__)
|
#if !defined(__SANITIZE_THREAD__)
|
||||||
// FIXME: these tests use too much memory with thread sanitizer
|
// FIXME: these tests use too much memory with thread sanitizer
|
||||||
test_cases.emplace_back(new test_moe(8, 2, 1, 4096, 8*1024));
|
test_cases.emplace_back(new test_moe(8, 2, 1, 4096, 8*1024));
|
||||||
//test_cases.emplace_back(new test_moe(8, 2, 8, 4096, 14336));
|
//test_cases.emplace_back(new test_moe(8, 2, 8, 4096, 14336));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// these tests are disabled to save execution time, but they can be handy for debugging
|
|
||||||
#if 0
|
|
||||||
test_cases.emplace_back(new test_llama(1));
|
test_cases.emplace_back(new test_llama(1));
|
||||||
test_cases.emplace_back(new test_llama(2));
|
test_cases.emplace_back(new test_llama(2));
|
||||||
test_cases.emplace_back(new test_falcon(1));
|
test_cases.emplace_back(new test_falcon(1));
|
||||||
|
|
|
@ -14,7 +14,7 @@ int main(int argc, char *argv[] ) {
|
||||||
fprintf(stderr, "using '%s'\n", model_path);
|
fprintf(stderr, "using '%s'\n", model_path);
|
||||||
fclose(file);
|
fclose(file);
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
auto params = llama_model_params{};
|
auto params = llama_model_params{};
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
params.progress_callback = [](float progress, void * ctx){
|
params.progress_callback = [](float progress, void * ctx){
|
||||||
|
|
64
unicode.h
64
unicode.h
|
@ -264,26 +264,29 @@ static uint32_t codepoint_from_utf8(const std::string & utf8, size_t & offset) {
|
||||||
offset += 1;
|
offset += 1;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
else if (!(utf8[offset + 0] & 0x40)) {
|
if (!(utf8[offset + 0] & 0x40)) {
|
||||||
throw std::invalid_argument("invalid character");
|
throw std::invalid_argument("invalid character");
|
||||||
}
|
}
|
||||||
else if (!(utf8[offset + 0] & 0x20)) {
|
if (!(utf8[offset + 0] & 0x20)) {
|
||||||
if (offset + 1 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80))
|
if (offset + 1 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80)) {
|
||||||
throw std::invalid_argument("invalid character");
|
throw std::invalid_argument("invalid character");
|
||||||
|
}
|
||||||
auto result = ((utf8[offset + 0] & 0x1f) << 6) | (utf8[offset + 1] & 0x3f);
|
auto result = ((utf8[offset + 0] & 0x1f) << 6) | (utf8[offset + 1] & 0x3f);
|
||||||
offset += 2;
|
offset += 2;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
else if (!(utf8[offset + 0] & 0x10)) {
|
if (!(utf8[offset + 0] & 0x10)) {
|
||||||
if (offset + 2 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80))
|
if (offset + 2 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80)) {
|
||||||
throw std::invalid_argument("invalid character");
|
throw std::invalid_argument("invalid character");
|
||||||
|
}
|
||||||
auto result = ((utf8[offset + 0] & 0x0f) << 12) | ((utf8[offset + 1] & 0x3f) << 6) | (utf8[offset + 2] & 0x3f);
|
auto result = ((utf8[offset + 0] & 0x0f) << 12) | ((utf8[offset + 1] & 0x3f) << 6) | (utf8[offset + 2] & 0x3f);
|
||||||
offset += 3;
|
offset += 3;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
else if (!(utf8[offset + 0] & 0x08)) {
|
if (!(utf8[offset + 0] & 0x08)) {
|
||||||
if (offset + 3 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80))
|
if (offset + 3 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80)) {
|
||||||
throw std::invalid_argument("invalid character");
|
throw std::invalid_argument("invalid character");
|
||||||
|
}
|
||||||
auto result = ((utf8[offset + 0] & 0x07) << 18) | ((utf8[offset + 1] & 0x3f) << 12) | ((utf8[offset + 2] & 0x3f) << 6) | (utf8[offset + 3] & 0x3f);
|
auto result = ((utf8[offset + 0] & 0x07) << 18) | ((utf8[offset + 1] & 0x3f) << 12) | ((utf8[offset + 2] & 0x3f) << 6) | (utf8[offset + 3] & 0x3f);
|
||||||
offset += 4;
|
offset += 4;
|
||||||
return result;
|
return result;
|
||||||
|
@ -331,21 +334,22 @@ static uint32_t codepoint_from_utf16(const std::vector<uint16_t> & utf16, size_t
|
||||||
offset += 1;
|
offset += 1;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00))
|
if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00)) {
|
||||||
throw std::invalid_argument("invalid character");
|
throw std::invalid_argument("invalid character");
|
||||||
|
}
|
||||||
|
|
||||||
auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff));
|
auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff));
|
||||||
offset += 2;
|
offset += 2;
|
||||||
return result;
|
return result;
|
||||||
}
|
|
||||||
throw std::invalid_argument("invalid string");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<uint32_t> codepoints_from_utf16(const std::vector<uint16_t> & utf16) {
|
static std::vector<uint32_t> codepoints_from_utf16(const std::vector<uint16_t> & utf16) {
|
||||||
std::vector<uint32_t> result;
|
std::vector<uint32_t> result;
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
while (offset < utf16.size())
|
while (offset < utf16.size()) {
|
||||||
result.push_back(codepoint_from_utf16(utf16, offset));
|
result.push_back(codepoint_from_utf16(utf16, offset));
|
||||||
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -361,44 +365,52 @@ static std::vector<uint32_t> codepoints_from_utf16(const std::vector<uint16_t> &
|
||||||
static std::unordered_map<uint32_t, int> codepoint_type_map() {
|
static std::unordered_map<uint32_t, int> codepoint_type_map() {
|
||||||
std::unordered_map<uint32_t, int> codepoint_types;
|
std::unordered_map<uint32_t, int> codepoint_types;
|
||||||
for (auto p : digit_ranges) {
|
for (auto p : digit_ranges) {
|
||||||
for(auto i = p.first; i <= p.second; ++ i)
|
for (auto i = p.first; i <= p.second; ++ i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_DIGIT;
|
codepoint_types[i] = CODEPOINT_TYPE_DIGIT;
|
||||||
}
|
}
|
||||||
for(auto p : letter_ranges) {
|
}
|
||||||
for(auto i = p.first; i <= p.second; ++ i)
|
for (auto p : letter_ranges) {
|
||||||
|
for (auto i = p.first; i <= p.second; ++ i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_LETTER;
|
codepoint_types[i] = CODEPOINT_TYPE_LETTER;
|
||||||
}
|
}
|
||||||
for(auto p : whitespace_ranges) {
|
}
|
||||||
for(auto i = p.first; i <= p.second; ++ i)
|
for (auto p : whitespace_ranges) {
|
||||||
|
for (auto i = p.first; i <= p.second; ++ i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_WHITESPACE;
|
codepoint_types[i] = CODEPOINT_TYPE_WHITESPACE;
|
||||||
}
|
}
|
||||||
for(auto p : accent_mark_ranges) {
|
}
|
||||||
for(auto i = p.first; i <= p.second; ++ i)
|
for (auto p : accent_mark_ranges) {
|
||||||
|
for (auto i = p.first; i <= p.second; ++ i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_ACCENT_MARK;
|
codepoint_types[i] = CODEPOINT_TYPE_ACCENT_MARK;
|
||||||
}
|
}
|
||||||
for(auto p : punctuation_ranges) {
|
}
|
||||||
for(auto i = p.first; i <= p.second; ++ i)
|
for (auto p : punctuation_ranges) {
|
||||||
|
for (auto i = p.first; i <= p.second; ++ i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_PUNCTUATION;
|
codepoint_types[i] = CODEPOINT_TYPE_PUNCTUATION;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
for (auto p : symbol_ranges) {
|
for (auto p : symbol_ranges) {
|
||||||
for (auto i = p.first; i <= p.second; ++i)
|
for (auto i = p.first; i <= p.second; ++i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_SYMBOL;
|
codepoint_types[i] = CODEPOINT_TYPE_SYMBOL;
|
||||||
}
|
}
|
||||||
for(auto p : control_ranges) {
|
}
|
||||||
for(auto i = p.first; i <= p.second; ++ i)
|
for (auto p : control_ranges) {
|
||||||
|
for (auto i = p.first; i <= p.second; ++ i) {
|
||||||
codepoint_types[i] = CODEPOINT_TYPE_CONTROL;
|
codepoint_types[i] = CODEPOINT_TYPE_CONTROL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return codepoint_types;
|
return codepoint_types;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int codepoint_type(uint32_t cp) {
|
static int codepoint_type(uint32_t cp) {
|
||||||
static std::unordered_map<uint32_t, int> codepoint_types = codepoint_type_map();
|
static std::unordered_map<uint32_t, int> codepoint_types = codepoint_type_map();
|
||||||
return codepoint_types[cp];
|
return codepoint_types.find(cp) == codepoint_types.end() ? CODEPOINT_TYPE_UNIDENTIFIED : codepoint_types.at(cp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int codepoint_type(const std::string & utf8) {
|
static int codepoint_type(const std::string & utf8) {
|
||||||
if (utf8.length() == 0)
|
if (utf8.length() == 0) {
|
||||||
return CODEPOINT_TYPE_UNIDENTIFIED;
|
return CODEPOINT_TYPE_UNIDENTIFIED;
|
||||||
|
}
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
return codepoint_type(codepoint_from_utf8(utf8, offset));
|
return codepoint_type(codepoint_from_utf8(utf8, offset));
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue