mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/bench.yml.disabled # Makefile # README.md # ggml/CMakeLists.txt # ggml/src/CMakeLists.txt # ggml/src/ggml-vulkan.cpp
This commit is contained in:
commit
1edf83761a
49 changed files with 110670 additions and 109313 deletions
|
@ -111,8 +111,34 @@ int32_t cpu_get_num_physical_cores() {
|
|||
if (result == 0) {
|
||||
return num_physical_cores;
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
//TODO: Implement
|
||||
#elif defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
|
||||
// TODO: windows + arm64 + mingw64
|
||||
unsigned int n_threads_win = std::thread::hardware_concurrency();
|
||||
unsigned int default_threads = n_threads_win > 0 ? (n_threads_win <= 4 ? n_threads_win : n_threads_win / 2) : 4;
|
||||
|
||||
DWORD buffer_size = 0;
|
||||
if (!GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &buffer_size)) {
|
||||
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
|
||||
return default_threads;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<char> buffer(buffer_size);
|
||||
if (!GetLogicalProcessorInformationEx(RelationProcessorCore, reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data()), &buffer_size)) {
|
||||
return default_threads;
|
||||
}
|
||||
|
||||
int32_t num_physical_cores = 0;
|
||||
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data());
|
||||
while (buffer_size > 0) {
|
||||
if (info->Relationship == RelationProcessorCore) {
|
||||
num_physical_cores += info->Processor.GroupCount;
|
||||
}
|
||||
buffer_size -= info->Size;
|
||||
info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(reinterpret_cast<char*>(info) + info->Size);
|
||||
}
|
||||
|
||||
return num_physical_cores > 0 ? num_physical_cores : default_threads;
|
||||
#endif
|
||||
unsigned int n_threads = std::thread::hardware_concurrency();
|
||||
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
|
||||
|
@ -1728,7 +1754,13 @@ std::string gpt_params_get_system_info(const gpt_params & params) {
|
|||
if (params.n_threads_batch != -1) {
|
||||
os << " (n_threads_batch = " << params.n_threads_batch << ")";
|
||||
}
|
||||
#if defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
|
||||
// TODO: windows + arm64 + mingw64
|
||||
DWORD logicalProcessorCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
|
||||
os << " / " << logicalProcessorCount << " | " << llama_print_system_info();
|
||||
#else
|
||||
os << " / " << std::thread::hardware_concurrency() << " | " << llama_print_system_info();
|
||||
#endif
|
||||
|
||||
return os.str();
|
||||
}
|
||||
|
@ -2703,12 +2735,6 @@ std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token>
|
|||
return text;
|
||||
}
|
||||
|
||||
bool llama_should_add_bos_token(const llama_model * model) {
|
||||
const int add_bos = llama_add_bos_token(model);
|
||||
|
||||
return add_bos != -1 ? bool(add_bos) : (llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
|
||||
}
|
||||
|
||||
//
|
||||
// Chat template utils
|
||||
//
|
||||
|
|
|
@ -402,10 +402,6 @@ std::string llama_detokenize(
|
|||
const std::vector<llama_token> & tokens,
|
||||
bool special = true);
|
||||
|
||||
// Uses the value from the model metadata if possible, otherwise
|
||||
// defaults to true when model type is SPM, otherwise false.
|
||||
bool llama_should_add_bos_token(const llama_model * model);
|
||||
|
||||
//
|
||||
// Chat template utils
|
||||
//
|
||||
|
|
|
@ -590,6 +590,15 @@ class Model:
|
|||
if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
|
||||
# ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
|
||||
res = "smollm"
|
||||
if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
|
||||
# ref: https://huggingface.co/bigscience/bloom
|
||||
res = "bloom"
|
||||
if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
|
||||
# ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
|
||||
res = "gpt3-finnish"
|
||||
if chkhsh == "4e2b24cc4770243d65a2c9ec19770a72f08cffc161adbb73fcbb6b7dd45a0aae":
|
||||
# ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
|
||||
res = "exaone"
|
||||
|
||||
if res is None:
|
||||
logger.warning("\n")
|
||||
|
@ -893,7 +902,7 @@ class GPTNeoXModel(Model):
|
|||
return tensors
|
||||
|
||||
|
||||
@Model.register("BloomForCausalLM")
|
||||
@Model.register("BloomForCausalLM", "BloomModel")
|
||||
class BloomModel(Model):
|
||||
model_arch = gguf.MODEL_ARCH.BLOOM
|
||||
|
||||
|
@ -3734,6 +3743,118 @@ class ChatGLMModel(Model):
|
|||
name = name.removeprefix("transformer.")
|
||||
return [(self.map_tensor_name(name), data_torch)]
|
||||
|
||||
|
||||
@Model.register("NemotronForCausalLM")
|
||||
class NemotronModel(Model):
|
||||
model_arch = gguf.MODEL_ARCH.NEMOTRON
|
||||
|
||||
def set_vocab(self):
|
||||
self._set_vocab_sentencepiece()
|
||||
self.gguf_writer.add_pad_token_id(0)
|
||||
self.gguf_writer.add_unk_token_id(1)
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
hparams = self.hparams
|
||||
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
|
||||
|
||||
f_norm_eps = self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon", "norm_eps"])
|
||||
self.gguf_writer.add_layer_norm_eps(f_norm_eps)
|
||||
|
||||
# * Partial RoPE
|
||||
rot_pct = self.find_hparam(["partial_rotary_factor", "rope_pct", "rope_percent"])
|
||||
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
||||
n_head = self.find_hparam(["num_attention_heads", "n_head"])
|
||||
self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
|
||||
|
||||
# * RopeScaling for Nemotron
|
||||
if "rope_scaling" not in self.hparams or self.hparams["rope_scaling"] is None:
|
||||
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
|
||||
else:
|
||||
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
|
||||
self.gguf_writer.add_rope_scaling_factor(self.hparams["factor"])
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
# * Adding +1 to LayerNorm's weights here to implement layernorm1p w/o changing anything on the GGML engine side
|
||||
# model.layers.{l}.input_layernorm.weight
|
||||
# model.layers.{l}.post_attention_layernorm.weight
|
||||
# model.norm.weight
|
||||
if name.endswith("norm.weight"):
|
||||
data_torch = data_torch + 1
|
||||
|
||||
return [(self.map_tensor_name(name), data_torch)]
|
||||
|
||||
|
||||
@Model.register("ExaoneForCausalLM")
|
||||
class ExaoneModel(Model):
|
||||
model_arch = gguf.MODEL_ARCH.EXAONE
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
hparams = self.hparams
|
||||
|
||||
assert(hparams["activation_function"] == "silu")
|
||||
|
||||
max_position_embeddings = hparams["max_position_embeddings"]
|
||||
embed_dim = hparams["hidden_size"]
|
||||
num_heads = hparams["num_attention_heads"]
|
||||
num_kv_heads = hparams.get("num_key_value_heads", num_heads)
|
||||
layer_norm_eps = hparams["layer_norm_epsilon"]
|
||||
intermediate_size = hparams["intermediate_size"] if "intermediate_size" in hparams else 4 * embed_dim
|
||||
num_layers = hparams["num_layers"]
|
||||
# ignore for now as EXAONE-3.0-7.8B-Instruct attentino_dropout is 0.0
|
||||
# attention_dropout_rate = hparams["attention_dropout"]
|
||||
# ignore for now as EXAONE-3.0-7.8B-Instruct embed_dropout is 0.0
|
||||
# embed_dropout_rate = hparams["embed_dropout"]
|
||||
self.gguf_writer.add_embedding_length(embed_dim)
|
||||
self.gguf_writer.add_head_count(num_heads)
|
||||
self.gguf_writer.add_head_count_kv(num_kv_heads)
|
||||
self.gguf_writer.add_context_length(max_position_embeddings)
|
||||
self.gguf_writer.add_layer_norm_rms_eps(layer_norm_eps)
|
||||
self.gguf_writer.add_feed_forward_length(intermediate_size)
|
||||
self.gguf_writer.add_block_count(num_layers)
|
||||
self.gguf_writer.add_file_type(self.ftype)
|
||||
|
||||
if (rope_theta := self.hparams.get("rope_theta")) is not None:
|
||||
self.gguf_writer.add_rope_freq_base(rope_theta)
|
||||
rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"], optional=True)
|
||||
rotary_factor = rotary_factor if rotary_factor is not None else 1.0
|
||||
self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"])))
|
||||
if hparams.get("rope_scaling") is not None and "factor" in hparams["rope_scaling"]:
|
||||
if hparams["rope_scaling"].get("type") == "linear":
|
||||
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
|
||||
self.gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"])
|
||||
|
||||
def prepare_tensors(self):
|
||||
if rope_scaling := self.find_hparam(["rope_scaling"], optional=True):
|
||||
if rope_scaling.get("rope_type", '').lower() == "llama3":
|
||||
base = self.hparams.get("rope_theta", 10000.0)
|
||||
dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
|
||||
freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
|
||||
|
||||
factor = rope_scaling.get("factor", 8.0)
|
||||
low_freq_factor = rope_scaling.get("low_freq_factor", 1.0)
|
||||
high_freq_factor = rope_scaling.get("high_freq_factor", 4.0)
|
||||
old_context_len = self.hparams.get("original_max_position_embeddings", 8192)
|
||||
|
||||
low_freq_wavelen = old_context_len / low_freq_factor
|
||||
high_freq_wavelen = old_context_len / high_freq_factor
|
||||
assert low_freq_wavelen != high_freq_wavelen
|
||||
|
||||
rope_factors = []
|
||||
for freq in freqs:
|
||||
wavelen = 2 * math.pi / freq
|
||||
if wavelen < high_freq_wavelen:
|
||||
rope_factors.append(1)
|
||||
elif wavelen > low_freq_wavelen:
|
||||
rope_factors.append(factor)
|
||||
else:
|
||||
smooth = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
|
||||
rope_factors.append(1 / ((1 - smooth) / factor + smooth))
|
||||
|
||||
self.gguf_writer.add_tensor(self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FREQS), np.array(rope_factors, dtype=np.float32))
|
||||
|
||||
super().prepare_tensors()
|
||||
|
||||
###### CONVERSION LOGIC ######
|
||||
|
||||
|
||||
|
|
|
@ -94,6 +94,9 @@ models = [
|
|||
{"name": "codeshell", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/WisdomShell/CodeShell-7B", },
|
||||
{"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", },
|
||||
{"name": "smollm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", },
|
||||
{'name': "bloom", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigscience/bloom", },
|
||||
{'name': "gpt3-finnish", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/TurkuNLP/gpt3-finnish-small", },
|
||||
{"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", },
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ class Tensor:
|
|||
assert quant is not None, 'Unknown tensor type'
|
||||
(blksize, tysize) = quant
|
||||
offset += 12
|
||||
self.dtype= dtype
|
||||
self.dtype= gguf.GGMLQuantizationType(dtype)
|
||||
self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)])
|
||||
offset += 4 * n_dims
|
||||
self.name = bytes(data[offset:offset + name_len])
|
||||
|
|
|
@ -271,7 +271,7 @@ struct tokenized_prompt {
|
|||
size_t max_seq_len;
|
||||
|
||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos, true);
|
||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos, true);
|
||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||
|
|
|
@ -127,7 +127,7 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
|
|||
}
|
||||
|
||||
static bool run(llama_context * ctx, const gpt_params & params) {
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
|
||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
|
||||
|
||||
|
|
|
@ -434,8 +434,8 @@ static void process_logits(
|
|||
}
|
||||
|
||||
static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx)));
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
auto tim1 = std::chrono::high_resolution_clock::now();
|
||||
|
|
|
@ -204,8 +204,8 @@ int main(int argc, char ** argv) {
|
|||
LOG_TEE("\n");
|
||||
LOG_TEE("%s\n", gpt_params_get_system_info(params).c_str());
|
||||
}
|
||||
const bool add_bos = llama_should_add_bos_token(model);
|
||||
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
||||
const bool add_bos = llama_add_bos_token(model);
|
||||
GGML_ASSERT(!llama_add_eos_token(model));
|
||||
LOG("add_bos: %d\n", add_bos);
|
||||
|
||||
std::vector<llama_token> embd_inp;
|
||||
|
|
|
@ -16,8 +16,8 @@ Convert PyTorch model to gguf files (You can also download the converted [gguf](
|
|||
|
||||
```bash
|
||||
python ./examples/minicpmv/minicpmv-surgery.py -m ../MiniCPM-Llama3-V-2_5
|
||||
python ./examples/minicpmv/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-Llama3-V-2_5 --minicpmv-projector ../MiniCPM-Llama3-V-2_5/minicpmv.projector --output-dir ../MiniCPM-Llama3-V-2_5/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5
|
||||
python ./convert-hf-to-gguf.py ../MiniCPM-Llama3-V-2_5/model
|
||||
python ./examples/minicpmv/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-Llama3-V-2_5 --minicpmv-projector ../MiniCPM-Llama3-V-2_5/minicpmv.projector --output-dir ../MiniCPM-Llama3-V-2_5/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 2
|
||||
python ./convert_hf_to_gguf.py ../MiniCPM-Llama3-V-2_5/model
|
||||
|
||||
# quantize int4 version
|
||||
./llama-quantize ../MiniCPM-Llama3-V-2_5/model/model-8B-F16.gguf ../MiniCPM-Llama3-V-2_5/model/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||
|
|
107
examples/llava/README-minicpmv2.6.md
Normal file
107
examples/llava/README-minicpmv2.6.md
Normal file
|
@ -0,0 +1,107 @@
|
|||
## MiniCPM-V 2.6
|
||||
|
||||
### Prepare models and code
|
||||
|
||||
Download [MiniCPM-V-2_6](https://huggingface.co/openbmb/MiniCPM-V-2_6) PyTorch model from huggingface to "MiniCPM-V-2_6" folder.
|
||||
|
||||
Clone llama.cpp:
|
||||
```bash
|
||||
git clone git@github.com:OpenBMB/llama.cpp.git
|
||||
cd llama.cpp
|
||||
git checkout minicpmv-main
|
||||
```
|
||||
|
||||
### Usage of MiniCPM-V 2.6
|
||||
|
||||
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) by us)
|
||||
|
||||
```bash
|
||||
python ./examples/llava/minicpmv-surgery.py -m ../MiniCPM-V-2_6
|
||||
python ./examples/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2_6 --minicpmv-projector ../MiniCPM-V-2_6/minicpmv.projector --output-dir ../MiniCPM-V-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 3
|
||||
python ./convert_hf_to_gguf.py ../MiniCPM-V-2_6/model
|
||||
|
||||
# quantize int4 version
|
||||
./llama-quantize ../MiniCPM-V-2_6/model/ggml-model-f16.gguf ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||
```
|
||||
|
||||
Build for Linux or Mac
|
||||
|
||||
```bash
|
||||
make
|
||||
make llama-minicpmv-cli
|
||||
```
|
||||
|
||||
Inference on Linux or Mac
|
||||
```
|
||||
# run f16 version
|
||||
./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-f16.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
|
||||
|
||||
# run quantized int4 version
|
||||
./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
|
||||
|
||||
# or run in interactive mode
|
||||
./llama-minicpmv-cli -m ../MiniCPM-V-2_6/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-2_6/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -i
|
||||
```
|
||||
|
||||
### Video
|
||||
Install FFmpeg
|
||||
```
|
||||
brew install ffmpeg
|
||||
brew install pkg-config
|
||||
```
|
||||
|
||||
### Android
|
||||
|
||||
#### Build on Android device using Termux
|
||||
We found that build on Android device would bring better runtime performance, so we recommend to build on device.
|
||||
|
||||
[Termux](https://github.com/termux/termux-app#installation) is a terminal app on Android device (no root required).
|
||||
|
||||
Install tools in Termux:
|
||||
```
|
||||
apt update && apt upgrade -y
|
||||
apt install git make cmake
|
||||
```
|
||||
|
||||
It's recommended to move your model inside the `~/` directory for best performance:
|
||||
```
|
||||
cd storage/downloads
|
||||
mv model.gguf ~/
|
||||
```
|
||||
|
||||
#### Building the Project using Android NDK
|
||||
Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
|
||||
|
||||
Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
|
||||
|
||||
```bash
|
||||
mkdir build-android
|
||||
cd build-android
|
||||
export NDK=/your_ndk_path
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
||||
make
|
||||
```
|
||||
|
||||
Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
|
||||
|
||||
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
||||
|
||||
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
||||
```
|
||||
$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/
|
||||
$cd /data/data/com.termux/files/home/bin
|
||||
$chmod +x ./*
|
||||
```
|
||||
|
||||
Download models and push them to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/`
|
||||
|
||||
```
|
||||
$mv /sdcard/llama.cpp/ggml-model-Q4_K_M.gguf /data/data/com.termux/files/home/model/
|
||||
$mv /sdcard/llama.cpp/mmproj-model-f16.gguf /data/data/com.termux/files/home/model/
|
||||
```
|
||||
|
||||
Now, you can start chatting:
|
||||
```
|
||||
$cd /data/data/com.termux/files/home/bin
|
||||
$./llama-minicpmv-cli -m ../model/ggml-model-Q4_K_M.gguf --mmproj ../model/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
|
||||
```
|
|
@ -81,6 +81,7 @@ static std::string format(const char * fmt, ...) {
|
|||
#define KEY_HAS_VIS_ENC "clip.has_vision_encoder"
|
||||
#define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector"
|
||||
#define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector"
|
||||
#define KEY_MINICPMV_VERSION "clip.minicpmv_version"
|
||||
#define KEY_USE_GELU "clip.use_gelu"
|
||||
#define KEY_N_EMBD "clip.%s.embedding_length"
|
||||
#define KEY_N_FF "clip.%s.feed_forward_length"
|
||||
|
@ -526,6 +527,7 @@ struct clip_ctx {
|
|||
bool has_vision_encoder = false;
|
||||
bool has_llava_projector = false;
|
||||
bool has_minicpmv_projector = false;
|
||||
int minicpmv_version = 2;
|
||||
|
||||
struct clip_vision_model vision_model;
|
||||
projector_type proj_type = PROJECTOR_TYPE_MLP;
|
||||
|
@ -641,7 +643,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||
if (ctx->has_minicpmv_projector) {
|
||||
int pos_w = image_size_width/patch_size;
|
||||
int pos_h = image_size_height/patch_size;
|
||||
pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1);
|
||||
if (ctx->minicpmv_version == 2) {
|
||||
pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1);
|
||||
}
|
||||
else if (ctx->minicpmv_version == 3) {
|
||||
pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1);
|
||||
}
|
||||
ggml_set_name(pos_embed, "pos_embed");
|
||||
ggml_set_input(pos_embed);
|
||||
}
|
||||
|
@ -768,8 +775,8 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||
embeddings = ggml_gelu(ctx0, embeddings);
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||
embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||
|
||||
} else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||
}
|
||||
else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
||||
// ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
|
||||
|
@ -949,10 +956,20 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||
}
|
||||
|
||||
{ // attention
|
||||
const int hidden_size = 4096;
|
||||
int hidden_size = 4096;
|
||||
const int d_head = 128;
|
||||
const int n_head = hidden_size/d_head;
|
||||
const int num_query = 96;
|
||||
int n_head = hidden_size/d_head;
|
||||
int num_query = 96;
|
||||
if (ctx->minicpmv_version == 2) {
|
||||
hidden_size = 4096;
|
||||
n_head = hidden_size/d_head;
|
||||
num_query = 96;
|
||||
}
|
||||
else if (ctx->minicpmv_version == 3) {
|
||||
hidden_size = 3584;
|
||||
n_head = hidden_size/d_head;
|
||||
num_query = 64;
|
||||
}
|
||||
|
||||
struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q), model.mm_model_attn_q_b);
|
||||
Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head));
|
||||
|
@ -1149,6 +1166,11 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||
new_clip->has_minicpmv_projector = gguf_get_val_bool(ctx, idx);
|
||||
}
|
||||
|
||||
idx = gguf_find_key(ctx, KEY_MINICPMV_VERSION);
|
||||
if (idx != -1) {
|
||||
new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx);
|
||||
}
|
||||
|
||||
// GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search
|
||||
|
||||
GGML_ASSERT(new_clip->has_vision_encoder);
|
||||
|
@ -1910,10 +1932,12 @@ int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
|
|||
// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
|
||||
// res_imgs memory is being allocated here, previous allocations will be freed if found
|
||||
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch * res_imgs) {
|
||||
if (clip_is_minicpmv(ctx)) {
|
||||
std::vector<std::vector<clip_image_u8 *>> imgs = uhd_slice_image(img);
|
||||
|
||||
if(clip_is_minicpmv(ctx)){
|
||||
int max_slice_nums = 9;
|
||||
std::vector<std::vector<clip_image_u8 *>> imgs = uhd_slice_image(img, max_slice_nums);
|
||||
res_imgs->size = 0;
|
||||
for (size_t i = 0; i < imgs.size(); ++i) {
|
||||
for (size_t i = 0; i < imgs.size(); ++i){
|
||||
res_imgs->size += imgs[i].size();
|
||||
}
|
||||
res_imgs->data = new clip_image_f32[res_imgs->size];
|
||||
|
@ -2146,7 +2170,12 @@ int clip_n_patches(const struct clip_ctx * ctx) {
|
|||
if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
|
||||
n_patches /= 4;
|
||||
} else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
|
||||
n_patches = 96;
|
||||
if (ctx->minicpmv_version == 2) {
|
||||
n_patches = 96;
|
||||
}
|
||||
else if (ctx->minicpmv_version == 3) {
|
||||
n_patches = 64;
|
||||
}
|
||||
}
|
||||
|
||||
return n_patches;
|
||||
|
@ -2282,6 +2311,11 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
const int patch_size = hparams.patch_size;
|
||||
const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
|
||||
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||
if(ctx->load_image_size==nullptr){
|
||||
ctx->load_image_size= clip_image_size_init();
|
||||
}
|
||||
const int pos_w = ctx->load_image_size->width/patch_size;
|
||||
const int pos_h = ctx->load_image_size->height/patch_size;
|
||||
|
||||
{
|
||||
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
||||
|
@ -2316,8 +2350,18 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
// -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
|
||||
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
|
||||
int* positions_data = (int*)malloc(ggml_nbytes(positions));
|
||||
for (int i = 0; i < num_positions; i++) {
|
||||
positions_data[i] = std::floor(70.0*i/num_positions);
|
||||
int bucket_coords_h[70];
|
||||
int bucket_coords_w[70];
|
||||
for (int i = 0; i < pos_h; i++){
|
||||
bucket_coords_h[i] = std::floor(70.0*i/pos_h);
|
||||
}
|
||||
for (int i = 0; i < pos_w; i++){
|
||||
bucket_coords_w[i] = std::floor(70.0*i/pos_w);
|
||||
}
|
||||
for (int i = 0, id = 0; i < pos_h; i++){
|
||||
for (int j = 0; j < pos_w; j++){
|
||||
positions_data[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
|
||||
}
|
||||
}
|
||||
ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
|
||||
free(positions_data);
|
||||
|
@ -2328,12 +2372,13 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
// -> https://huggingface.co/Qwen/Qwen-VL/tree/main
|
||||
// -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
|
||||
struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed");
|
||||
if(ctx->load_image_size==nullptr){
|
||||
ctx->load_image_size= clip_image_size_init();
|
||||
}
|
||||
int pos_w = ctx->load_image_size->width/patch_size;
|
||||
int pos_h = ctx->load_image_size->height/patch_size;
|
||||
int embed_dim = 4096;
|
||||
if (ctx->minicpmv_version == 2) {
|
||||
embed_dim = 4096;
|
||||
}
|
||||
else if (ctx->minicpmv_version == 3) {
|
||||
embed_dim = 3584;
|
||||
}
|
||||
auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
|
||||
|
||||
float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed));
|
||||
|
@ -2346,7 +2391,8 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
ggml_backend_tensor_set(pos_embed, pos_embed_data, 0, ggml_nbytes(pos_embed));
|
||||
free(pos_embed_data);
|
||||
}
|
||||
} else {
|
||||
}
|
||||
else{
|
||||
{
|
||||
if (ctx->has_class_embedding) {
|
||||
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
||||
|
@ -2548,13 +2594,21 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|||
return ctx->vision_model.mm_3_b->ne[0];
|
||||
}
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
|
||||
return 4096;
|
||||
if (ctx->minicpmv_version == 2) {
|
||||
return 4096;
|
||||
}
|
||||
else if (ctx->minicpmv_version == 3) {
|
||||
return 3584;
|
||||
}
|
||||
}
|
||||
|
||||
std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
|
||||
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
|
||||
}
|
||||
|
||||
bool clip_is_minicpmv(const struct clip_ctx * ctx) {
|
||||
return ctx->has_minicpmv_projector;
|
||||
int clip_is_minicpmv(const struct clip_ctx * ctx) {
|
||||
if (ctx->has_minicpmv_projector) {
|
||||
return ctx->minicpmv_version;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, cons
|
|||
|
||||
CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype);
|
||||
|
||||
CLIP_API bool clip_is_minicpmv(const struct clip_ctx * ctx);
|
||||
CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -256,7 +256,14 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli
|
|||
load_image_size->width = img_res_v.data[i].nx;
|
||||
load_image_size->height = img_res_v.data[i].ny;
|
||||
clip_add_load_image_size(ctx_clip, load_image_size);
|
||||
const bool encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]);
|
||||
bool encoded = false;
|
||||
int has_minicpmv_projector = clip_is_minicpmv(ctx_clip);
|
||||
if (has_minicpmv_projector == 2) {
|
||||
encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]);
|
||||
}
|
||||
else if (has_minicpmv_projector == 3) {
|
||||
encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]);
|
||||
}
|
||||
if (!encoded) {
|
||||
LOG_TEE("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size);
|
||||
return false;
|
||||
|
|
|
@ -134,7 +134,13 @@ static void process_image(struct llava_context * ctx_llava, struct llava_image_e
|
|||
std::string system_prompt;
|
||||
int idx = 0;
|
||||
int num_image_embeds = embeds->n_image_pos / clip_n_patches(ctx_llava->ctx_clip);
|
||||
system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
|
||||
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
|
||||
if (has_minicpmv_projector == 2) {
|
||||
system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
|
||||
}
|
||||
else if (has_minicpmv_projector == 3) {
|
||||
system_prompt = "<|im_start|>user\n";
|
||||
}
|
||||
LOG_TEE("%s: image token past: %d\n", __func__, n_past);
|
||||
eval_string(ctx_llava->ctx_llama, (system_prompt+"<image>").c_str(), params->n_batch, &n_past, false);
|
||||
process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++);
|
||||
|
@ -210,10 +216,24 @@ static struct llava_context * minicpmv_init(gpt_params * params, const std::stri
|
|||
|
||||
static struct llama_sampling_context * llama_init(struct llava_context * ctx_llava, gpt_params * params, std::string prompt, int &n_past, bool is_first = false){
|
||||
std::string user_prompt = prompt;
|
||||
if (!is_first) user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
|
||||
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
|
||||
if (!is_first) {
|
||||
if (has_minicpmv_projector == 2) {
|
||||
user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
|
||||
}
|
||||
else if (has_minicpmv_projector == 3) {
|
||||
user_prompt = "<|im_start|>user\n" + prompt;
|
||||
}
|
||||
}
|
||||
|
||||
eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
|
||||
eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
|
||||
if (has_minicpmv_projector == 2) {
|
||||
eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
|
||||
}
|
||||
else if (has_minicpmv_projector == 3) {
|
||||
eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
|
||||
}
|
||||
|
||||
// generate the response
|
||||
|
||||
LOG_TEE("\n");
|
||||
|
|
|
@ -1,9 +1,416 @@
|
|||
import argparse
|
||||
# coding=utf-8
|
||||
# Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" PyTorch Siglip model. """
|
||||
# Copied from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit and add tgt_sizes
|
||||
|
||||
|
||||
import os
|
||||
import math
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn.init import _calculate_fan_in_and_fan_out
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.modeling_utils import PreTrainedModel
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
from transformers.utils import (
|
||||
logging,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
class SiglipVisionConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
|
||||
Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
|
||||
configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
|
||||
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
Args:
|
||||
hidden_size (`int`, *optional*, defaults to 768):
|
||||
Dimensionality of the encoder layers and the pooler layer.
|
||||
intermediate_size (`int`, *optional*, defaults to 3072):
|
||||
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
||||
num_hidden_layers (`int`, *optional*, defaults to 12):
|
||||
Number of hidden layers in the Transformer encoder.
|
||||
num_attention_heads (`int`, *optional*, defaults to 12):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
num_channels (`int`, *optional*, defaults to 3):
|
||||
Number of channels in the input images.
|
||||
image_size (`int`, *optional*, defaults to 224):
|
||||
The size (resolution) of each image.
|
||||
patch_size (`int`, *optional*, defaults to 16):
|
||||
The size (resolution) of each patch.
|
||||
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
|
||||
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
||||
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
||||
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
||||
The epsilon used by the layer normalization layers.
|
||||
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the attention probabilities.
|
||||
Example:
|
||||
```python
|
||||
>>> from transformers import SiglipVisionConfig, SiglipVisionModel
|
||||
>>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
|
||||
>>> configuration = SiglipVisionConfig()
|
||||
>>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
|
||||
>>> model = SiglipVisionModel(configuration)
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
|
||||
model_type = "siglip_vision_model"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size=768,
|
||||
intermediate_size=3072,
|
||||
num_hidden_layers=12,
|
||||
num_attention_heads=12,
|
||||
num_channels=3,
|
||||
image_size=224,
|
||||
patch_size=16,
|
||||
hidden_act="gelu_pytorch_tanh",
|
||||
layer_norm_eps=1e-6,
|
||||
attention_dropout=0.0,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.hidden_size = hidden_size
|
||||
self.intermediate_size = intermediate_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.num_channels = num_channels
|
||||
self.patch_size = patch_size
|
||||
self.image_size = image_size
|
||||
self.attention_dropout = attention_dropout
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
self.hidden_act = hidden_act
|
||||
|
||||
_CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
|
||||
|
||||
SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
||||
"google/siglip-base-patch16-224",
|
||||
# See all SigLIP models at https://huggingface.co/models?filter=siglip
|
||||
]
|
||||
|
||||
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
||||
def _get_unpad_data(attention_mask):
|
||||
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
||||
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
||||
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
||||
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
||||
return (
|
||||
indices,
|
||||
cu_seqlens,
|
||||
max_seqlen_in_batch,
|
||||
)
|
||||
|
||||
|
||||
def _trunc_normal_(tensor, mean, std, a, b):
|
||||
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
||||
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
||||
def norm_cdf(x):
|
||||
# Computes standard normal cumulative distribution function
|
||||
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
||||
|
||||
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
||||
warnings.warn(
|
||||
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
||||
"The distribution of values may be incorrect.",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Values are generated by using a truncated uniform distribution and
|
||||
# then using the inverse CDF for the normal distribution.
|
||||
# Get upper and lower cdf values
|
||||
l = norm_cdf((a - mean) / std)
|
||||
u = norm_cdf((b - mean) / std)
|
||||
|
||||
# Uniformly fill tensor with values from [l, u], then translate to
|
||||
# [2l-1, 2u-1].
|
||||
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
||||
|
||||
# Use inverse cdf transform for normal distribution to get truncated
|
||||
# standard normal
|
||||
if tensor.dtype in [torch.float16, torch.bfloat16]:
|
||||
# The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
|
||||
og_dtype = tensor.dtype
|
||||
tensor = tensor.to(torch.float32)
|
||||
tensor.erfinv_()
|
||||
tensor = tensor.to(og_dtype)
|
||||
else:
|
||||
tensor.erfinv_()
|
||||
|
||||
# Transform to proper mean, std
|
||||
tensor.mul_(std * math.sqrt(2.0))
|
||||
tensor.add_(mean)
|
||||
|
||||
# Clamp to ensure it's in the proper range
|
||||
if tensor.dtype == torch.float16:
|
||||
# The `clamp_` op is not (yet?) defined in float16+cpu
|
||||
tensor = tensor.to(torch.float32)
|
||||
tensor.clamp_(min=a, max=b)
|
||||
tensor = tensor.to(torch.float16)
|
||||
else:
|
||||
tensor.clamp_(min=a, max=b)
|
||||
|
||||
|
||||
def trunc_normal_tf_(
|
||||
tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
|
||||
):
|
||||
"""Fills the input Tensor with values drawn from a truncated
|
||||
normal distribution. The values are effectively drawn from the
|
||||
normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
|
||||
with values outside :math:`[a, b]` redrawn until they are within
|
||||
the bounds. The method used for generating the random values works
|
||||
best when :math:`a \\leq \text{mean} \\leq b`.
|
||||
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
|
||||
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
|
||||
and the result is subsquently scaled and shifted by the mean and std args.
|
||||
Args:
|
||||
tensor: an n-dimensional `torch.Tensor`
|
||||
mean: the mean of the normal distribution
|
||||
std: the standard deviation of the normal distribution
|
||||
a: the minimum cutoff value
|
||||
b: the maximum cutoff value
|
||||
"""
|
||||
with torch.no_grad():
|
||||
_trunc_normal_(tensor, 0, 1.0, a, b)
|
||||
tensor.mul_(std).add_(mean)
|
||||
|
||||
|
||||
def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
|
||||
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
|
||||
denom = fan_in
|
||||
if mode == "fan_in":
|
||||
denom = fan_in
|
||||
elif mode == "fan_out":
|
||||
denom = fan_out
|
||||
elif mode == "fan_avg":
|
||||
denom = (fan_in + fan_out) / 2
|
||||
|
||||
variance = scale / denom
|
||||
|
||||
if distribution == "truncated_normal":
|
||||
# constant is stddev of standard normal truncated to (-2, 2)
|
||||
trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
|
||||
elif distribution == "normal":
|
||||
with torch.no_grad():
|
||||
tensor.normal_(std=math.sqrt(variance))
|
||||
elif distribution == "uniform":
|
||||
bound = math.sqrt(3 * variance)
|
||||
with torch.no_grad():
|
||||
tensor.uniform_(-bound, bound)
|
||||
else:
|
||||
raise ValueError(f"invalid distribution {distribution}")
|
||||
|
||||
|
||||
def lecun_normal_(tensor):
|
||||
variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
|
||||
|
||||
|
||||
def default_flax_embed_init(tensor):
|
||||
variance_scaling_(tensor, mode="fan_in", distribution="normal")
|
||||
|
||||
class SiglipVisionEmbeddings(nn.Module):
|
||||
def __init__(self, config: SiglipVisionConfig):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.embed_dim = config.hidden_size
|
||||
self.image_size = config.image_size
|
||||
self.patch_size = config.patch_size
|
||||
|
||||
self.patch_embedding = nn.Conv2d(
|
||||
in_channels=config.num_channels,
|
||||
out_channels=self.embed_dim,
|
||||
kernel_size=self.patch_size,
|
||||
stride=self.patch_size,
|
||||
padding="valid",
|
||||
)
|
||||
|
||||
self.num_patches_per_side = self.image_size // self.patch_size
|
||||
self.num_patches = self.num_patches_per_side**2
|
||||
self.num_positions = self.num_patches
|
||||
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
||||
|
||||
class SiglipAttention(nn.Module):
|
||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||
|
||||
# Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.embed_dim = config.hidden_size
|
||||
self.num_heads = config.num_attention_heads
|
||||
self.head_dim = self.embed_dim // self.num_heads
|
||||
if self.head_dim * self.num_heads != self.embed_dim:
|
||||
raise ValueError(
|
||||
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
||||
f" {self.num_heads})."
|
||||
)
|
||||
self.scale = self.head_dim**-0.5
|
||||
self.dropout = config.attention_dropout
|
||||
|
||||
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
||||
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
||||
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
||||
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
||||
|
||||
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
|
||||
class SiglipMLP(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.activation_fn = ACT2FN[config.hidden_act]
|
||||
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
||||
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
||||
|
||||
|
||||
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
|
||||
class SiglipEncoderLayer(nn.Module):
|
||||
def __init__(self, config: SiglipVisionConfig):
|
||||
super().__init__()
|
||||
self.embed_dim = config.hidden_size
|
||||
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
||||
self.self_attn = (
|
||||
SiglipAttention(config)
|
||||
)
|
||||
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
||||
self.mlp = SiglipMLP(config)
|
||||
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
||||
|
||||
class SiglipPreTrainedModel(PreTrainedModel):
|
||||
"""
|
||||
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
||||
models.
|
||||
"""
|
||||
|
||||
config_class = SiglipVisionConfig
|
||||
base_model_prefix = "siglip"
|
||||
supports_gradient_checkpointing = True
|
||||
|
||||
def _init_weights(self, module):
|
||||
"""Initialize the weights"""
|
||||
|
||||
if isinstance(module, SiglipVisionEmbeddings):
|
||||
width = self.config.hidden_size
|
||||
nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
|
||||
elif isinstance(module, nn.Embedding):
|
||||
default_flax_embed_init(module.weight)
|
||||
elif isinstance(module, SiglipAttention):
|
||||
nn.init.normal_(module.q_proj.weight)
|
||||
nn.init.normal_(module.k_proj.weight)
|
||||
nn.init.normal_(module.v_proj.weight)
|
||||
nn.init.normal_(module.out_proj.weight)
|
||||
nn.init.zeros_(module.q_proj.bias)
|
||||
nn.init.zeros_(module.k_proj.bias)
|
||||
nn.init.zeros_(module.v_proj.bias)
|
||||
nn.init.zeros_(module.out_proj.bias)
|
||||
elif isinstance(module, SiglipMLP):
|
||||
nn.init.normal_(module.fc1.weight)
|
||||
nn.init.normal_(module.fc2.weight)
|
||||
nn.init.normal_(module.fc1.bias, std=1e-6)
|
||||
nn.init.normal_(module.fc2.bias, std=1e-6)
|
||||
elif isinstance(module, (nn.Linear, nn.Conv2d)):
|
||||
lecun_normal_(module.weight)
|
||||
if module.bias is not None:
|
||||
nn.init.zeros_(module.bias)
|
||||
elif isinstance(module, nn.LayerNorm):
|
||||
module.bias.data.zero_()
|
||||
module.weight.data.fill_(1.0)
|
||||
|
||||
|
||||
SIGLIP_START_DOCSTRING = r"""
|
||||
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
||||
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
||||
etc.)
|
||||
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
||||
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
||||
and behavior.
|
||||
Parameters:
|
||||
config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model.
|
||||
Initializing with a config file does not load the weights associated with the model, only the
|
||||
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
||||
"""
|
||||
|
||||
|
||||
SIGLIP_VISION_INPUTS_DOCSTRING = r"""
|
||||
Args:
|
||||
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
||||
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
||||
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
||||
output_attentions (`bool`, *optional*):
|
||||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
||||
tensors for more detail.
|
||||
output_hidden_states (`bool`, *optional*):
|
||||
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
||||
more detail.
|
||||
return_dict (`bool`, *optional*):
|
||||
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
||||
"""
|
||||
|
||||
|
||||
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
|
||||
class SiglipEncoder(nn.Module):
|
||||
"""
|
||||
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
||||
[`SiglipEncoderLayer`].
|
||||
Args:
|
||||
config: SiglipConfig
|
||||
"""
|
||||
|
||||
def __init__(self, config: SiglipVisionConfig):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
||||
self.gradient_checkpointing = False
|
||||
|
||||
class SiglipVisionTransformer(SiglipPreTrainedModel):
|
||||
config_class = SiglipVisionConfig
|
||||
main_input_name = "pixel_values"
|
||||
_supports_flash_attn_2 = True
|
||||
|
||||
def __init__(self, config: SiglipVisionConfig):
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
embed_dim = config.hidden_size
|
||||
|
||||
self.embeddings = SiglipVisionEmbeddings(config)
|
||||
self.encoder = SiglipEncoder(config)
|
||||
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
||||
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
def get_input_embeddings(self) -> nn.Module:
|
||||
return self.embeddings.patch_embedding
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from gguf import *
|
||||
from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer, Idefics2VisionConfig
|
||||
|
@ -94,6 +501,7 @@ default_image_mean = [0.48145466, 0.4578275, 0.40821073]
|
|||
default_image_std = [0.26862954, 0.26130258, 0.27577711]
|
||||
ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
|
||||
ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
|
||||
ap.add_argument('--minicpmv_version', type=int, help='minicpmv_version: MiniCPM-V-2 use 1; MiniCPM-V-2.5 use 2; MiniCPM-V-2.6 use 3', default=2)
|
||||
|
||||
# with proper
|
||||
args = ap.parse_args()
|
||||
|
@ -135,6 +543,15 @@ if args.use_f32:
|
|||
# model = CLIPModel.from_pretrained(dir_model)
|
||||
# processor = CLIPProcessor.from_pretrained(dir_model)
|
||||
|
||||
minicpmv_version = args.minicpmv_version
|
||||
emb_dim = 4096
|
||||
if minicpmv_version == 1:
|
||||
emb_dim = 2304
|
||||
elif minicpmv_version == 2:
|
||||
emb_dim = 4096
|
||||
elif minicpmv_version == 3:
|
||||
emb_dim = 3584
|
||||
|
||||
default_vision_config = {
|
||||
"hidden_size": 1152,
|
||||
"image_size": 980,
|
||||
|
@ -144,8 +561,12 @@ default_vision_config = {
|
|||
"num_hidden_layers": 27,
|
||||
"patch_size": 14,
|
||||
}
|
||||
|
||||
vision_config = Idefics2VisionConfig(**default_vision_config)
|
||||
model = Idefics2VisionTransformer(vision_config)
|
||||
if minicpmv_version == 3:
|
||||
vision_config = SiglipVisionConfig(**default_vision_config)
|
||||
model = SiglipVisionTransformer(vision_config)
|
||||
|
||||
processor = None
|
||||
# if model.attn_pool is not None:
|
||||
|
@ -158,6 +579,7 @@ fname_middle = None
|
|||
has_text_encoder = True
|
||||
has_vision_encoder = True
|
||||
has_minicpmv_projector = False
|
||||
|
||||
if args.text_only:
|
||||
fname_middle = "text-"
|
||||
has_vision_encoder = False
|
||||
|
@ -165,6 +587,7 @@ elif args.minicpmv_projector is not None:
|
|||
fname_middle = "mmproj-"
|
||||
has_text_encoder = False
|
||||
has_minicpmv_projector = True
|
||||
minicpmv_version = 3
|
||||
elif args.vision_only:
|
||||
fname_middle = "vision-"
|
||||
has_text_encoder = False
|
||||
|
@ -189,6 +612,7 @@ elif has_minicpmv_projector:
|
|||
fout.add_description("image encoder for MiniCPM-V")
|
||||
# add projector type
|
||||
fout.add_string("clip.projector_type", "resampler")
|
||||
fout.add_int32("clip.minicpmv_version", minicpmv_version)
|
||||
else:
|
||||
fout.add_description("two-tower CLIP model")
|
||||
|
||||
|
@ -274,11 +698,11 @@ def _replace_name_resampler(s, v):
|
|||
if re.match("resampler.pos_embed", s):
|
||||
return {
|
||||
s: v,
|
||||
re.sub("pos_embed", "pos_embed_k", s): torch.from_numpy(get_2d_sincos_pos_embed(4096, (70, 70))),
|
||||
re.sub("pos_embed", "pos_embed_k", s): torch.from_numpy(get_2d_sincos_pos_embed(emb_dim, (70, 70))),
|
||||
}
|
||||
if re.match("resampler.proj", s):
|
||||
return {
|
||||
re.sub("proj", "pos_embed_k", s): torch.from_numpy(get_2d_sincos_pos_embed(4096, (70, 70))),
|
||||
re.sub("proj", "pos_embed_k", s): torch.from_numpy(get_2d_sincos_pos_embed(emb_dim, (70, 70))),
|
||||
re.sub("proj", "proj.weight", s): v.transpose(-1, -2).contiguous(),
|
||||
}
|
||||
if re.match("resampler.attn.in_proj_.*", s):
|
||||
|
|
|
@ -4,7 +4,7 @@ import torch
|
|||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("-m", "--model", help="Path to MiniCPM-V-2.5 model")
|
||||
ap.add_argument("-m", "--model", help="Path to MiniCPM-V model")
|
||||
args = ap.parse_args()
|
||||
|
||||
# find the model part that includes the the multimodal projector weights
|
||||
|
@ -29,7 +29,6 @@ if len(clip_tensors) > 0:
|
|||
f.write("{}\n")
|
||||
|
||||
config = model.llm.config
|
||||
config._name_or_path = "openbmb/MiniCPM-Llama3-V-2.5"
|
||||
config.auto_map = {
|
||||
"AutoConfig": "configuration_minicpm.MiniCPMConfig",
|
||||
"AutoModel": "modeling_minicpm.MiniCPMModel",
|
||||
|
@ -40,7 +39,6 @@ config.auto_map = {
|
|||
model.llm.save_pretrained(f"{args.model}/model")
|
||||
tok = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
|
||||
tok.save_pretrained(f"{args.model}/model")
|
||||
# os.system(f"cp {args.model}/modeling_minicpm.py {args.model}/MiniCPM_l3/modeling_minicpm.py")
|
||||
|
||||
print("Done!")
|
||||
print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
|
||||
|
|
|
@ -268,9 +268,9 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
}
|
||||
|
||||
const bool add_bos = llama_should_add_bos_token(model);
|
||||
const bool add_bos = llama_add_bos_token(model);
|
||||
if (!llama_model_has_encoder(model)) {
|
||||
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
||||
GGML_ASSERT(!llama_add_eos_token(model));
|
||||
}
|
||||
LOG("add_bos: %d\n", add_bos);
|
||||
|
||||
|
|
|
@ -341,8 +341,8 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
|||
// Output: `perplexity: 13.5106 [114/114]`
|
||||
// BOS tokens will be added for each chunk before eval
|
||||
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx)));
|
||||
|
||||
fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
|
||||
|
||||
|
@ -481,8 +481,8 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||
// Output: `perplexity: 13.5106 [114/114]`
|
||||
// BOS tokens will be added for each chunk before eval
|
||||
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx)));
|
||||
|
||||
std::ofstream logits_stream;
|
||||
if (!params.logits_file.empty()) {
|
||||
|
@ -1734,8 +1734,8 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||
const int n_batch = params.n_batch;
|
||||
const int num_batches = (n_ctx + n_batch - 1)/n_batch;
|
||||
const int nv = 2*((n_vocab + 1)/2) + 4;
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx)));
|
||||
|
||||
std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
|
||||
std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
|
||||
|
|
|
@ -34,7 +34,7 @@ Run the quantized model:
|
|||
|
||||
```bash
|
||||
# start inference on a gguf model
|
||||
./llama-cli -m ./models/mymodel/ggml-model-Q4_K_M.gguf -n 128
|
||||
./llama-cli -m ./models/mymodel/ggml-model-Q4_K_M.gguf -cnv -p "You are a helpful assistant"
|
||||
```
|
||||
|
||||
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
||||
|
|
|
@ -253,6 +253,8 @@ int main(int argc, char ** argv) {
|
|||
chunks[i].tokens.clear();
|
||||
}
|
||||
|
||||
struct llama_batch query_batch = llama_batch_init(n_batch, 0, 1);
|
||||
|
||||
// start loop, receive query and return top k similar chunks based on cosine similarity
|
||||
std::string query;
|
||||
while (true) {
|
||||
|
@ -260,7 +262,6 @@ int main(int argc, char ** argv) {
|
|||
std::getline(std::cin, query);
|
||||
std::vector<int32_t> query_tokens = llama_tokenize(ctx, query, true);
|
||||
|
||||
struct llama_batch query_batch = llama_batch_init(n_batch, 0, 1);
|
||||
batch_add_seq(query_batch, query_tokens, 0);
|
||||
|
||||
std::vector<float> query_emb(n_embd, 0);
|
||||
|
@ -293,6 +294,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// clean up
|
||||
llama_batch_free(query_batch);
|
||||
llama_print_timings(ctx);
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
|
|
@ -368,15 +368,16 @@ node index.js
|
|||
|
||||
## API Endpoints
|
||||
|
||||
### GET `/health`: Returns the current state of the server
|
||||
### GET `/health`: Returns heath check result
|
||||
|
||||
- 503 -> `{"status": "loading model"}` if the model is still being loaded.
|
||||
- 500 -> `{"status": "error"}` if the model failed to load.
|
||||
- 200 -> `{"status": "ok", "slots_idle": 1, "slots_processing": 2 }` if the model is successfully loaded and the server is ready for further requests mentioned below.
|
||||
- 200 -> `{"status": "no slot available", "slots_idle": 0, "slots_processing": 32}` if no slots are currently available.
|
||||
- 503 -> `{"status": "no slot available", "slots_idle": 0, "slots_processing": 32}` if the query parameter `fail_on_no_slot` is provided and no slots are currently available.
|
||||
**Response format**
|
||||
|
||||
If the query parameter `include_slots` is passed, `slots` field will contain internal slots data except if `--slots-endpoint-disable` is set.
|
||||
- HTTP status code 503
|
||||
- Body: `{"error": {"code": 503, "message": "Loading model", "type": "unavailable_error"}}`
|
||||
- Explanation: the model is still being loaded.
|
||||
- HTTP status code 200
|
||||
- Body: `{"status": "ok" }`
|
||||
- Explanation: the model is successfully loaded and the server is ready.
|
||||
|
||||
### POST `/completion`: Given a `prompt`, it returns the predicted completion.
|
||||
|
||||
|
@ -639,10 +640,16 @@ Given a ChatML-formatted json description in `messages`, it returns the predicte
|
|||
}'
|
||||
```
|
||||
|
||||
### GET `/slots`: Returns the current slots processing state. Can be disabled with `--slots-endpoint-disable`.
|
||||
### GET `/slots`: Returns the current slots processing state
|
||||
|
||||
This endpoint can be disabled with `--no-slots`
|
||||
|
||||
If query param `?fail_on_no_slot=1` is set, this endpoint will respond with status code 503 if there is no available slots.
|
||||
|
||||
**Response format**
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
|
@ -702,7 +709,13 @@ Given a ChatML-formatted json description in `messages`, it returns the predicte
|
|||
]
|
||||
```
|
||||
|
||||
### GET `/metrics`: Prometheus compatible metrics exporter endpoint if `--metrics` is enabled:
|
||||
Possible values for `slot[i].state` are:
|
||||
- `0`: SLOT_STATE_IDLE
|
||||
- `1`: SLOT_STATE_PROCESSING
|
||||
|
||||
### GET `/metrics`: Prometheus compatible metrics exporter
|
||||
|
||||
This endpoint is only accessible if `--metrics` is set.
|
||||
|
||||
Available metrics:
|
||||
- `llamacpp:prompt_tokens_total`: Number of prompt tokens processed.
|
||||
|
@ -767,6 +780,10 @@ Available metrics:
|
|||
|
||||
### GET `/lora-adapters`: Get list of all LoRA adapters
|
||||
|
||||
This endpoint returns the loaded LoRA adapters. You can add adapters using `--lora` when starting the server, for example: `--lora my_adapter_1.gguf --lora my_adapter_2.gguf ...`
|
||||
|
||||
By default, all adapters will be loaded with scale set to 1. To initialize all adapters scale to 0, add `--lora-init-without-apply`
|
||||
|
||||
If an adapter is disabled, the scale will be set to 0.
|
||||
|
||||
**Response format**
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||
#define JSON_ASSERT GGML_ASSERT
|
||||
#include "json.hpp"
|
||||
// mime type for sending response
|
||||
#define MIMETYPE_JSON "application/json; charset=utf-8"
|
||||
|
||||
// auto generated files (update with ./deps.sh)
|
||||
#include "colorthemes.css.hpp"
|
||||
|
@ -68,7 +70,6 @@ enum slot_command {
|
|||
enum server_state {
|
||||
SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
|
||||
SERVER_STATE_READY, // Server is ready and model is loaded
|
||||
SERVER_STATE_ERROR // An error occurred, load_model failed
|
||||
};
|
||||
|
||||
enum server_task_type {
|
||||
|
@ -694,8 +695,8 @@ struct server_context {
|
|||
|
||||
n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
add_bos_token = llama_should_add_bos_token(model);
|
||||
has_eos_token = llama_add_eos_token(model) != 1;
|
||||
add_bos_token = llama_add_bos_token(model);
|
||||
has_eos_token = !llama_add_eos_token(model);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1323,7 +1324,7 @@ struct server_context {
|
|||
|
||||
return json {
|
||||
{"n_ctx", slot.n_ctx},
|
||||
{"n_predict", slot.n_predict},
|
||||
{"n_predict", slot.n_predict}, // Server configured n_predict
|
||||
{"model", params.model_alias},
|
||||
{"seed", slot.sparams.seed},
|
||||
{"temperature", slot.sparams.temp},
|
||||
|
@ -1345,7 +1346,7 @@ struct server_context {
|
|||
{"mirostat_eta", slot.sparams.mirostat_eta},
|
||||
{"penalize_nl", slot.sparams.penalize_nl},
|
||||
{"stop", slot.params.antiprompt},
|
||||
{"n_predict", slot.params.n_predict}, // TODO: fix duplicate key n_predict
|
||||
{"max_tokens", slot.params.n_predict}, // User configured n_predict
|
||||
{"n_keep", slot.params.n_keep},
|
||||
{"n_discard", slot.params.n_discard},
|
||||
{"ignore_eos", ignore_eos},
|
||||
|
@ -1853,6 +1854,8 @@ struct server_context {
|
|||
llama_lora_adapters_apply(ctx, lora_adapters);
|
||||
server_task_result result;
|
||||
result.id = task.id;
|
||||
result.stop = true;
|
||||
result.error = false;
|
||||
result.data = json{{ "success", true }};
|
||||
queue_results.send(result);
|
||||
} break;
|
||||
|
@ -2037,7 +2040,7 @@ struct server_context {
|
|||
slot.t_start_generation = 0;
|
||||
|
||||
if (slot.infill) {
|
||||
const bool add_bos = llama_should_add_bos_token(model);
|
||||
const bool add_bos = llama_add_bos_token(model);
|
||||
bool suff_rm_leading_spc = true;
|
||||
if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
||||
params.input_suffix.erase(0, 1);
|
||||
|
@ -2555,19 +2558,19 @@ int main(int argc, char ** argv) {
|
|||
svr->set_default_headers({{"Server", "llama.cpp"}});
|
||||
|
||||
// CORS preflight
|
||||
svr->Options(R"(.*)", [](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
svr->Options(R"(.*)", [](const httplib::Request &, httplib::Response & res) {
|
||||
// Access-Control-Allow-Origin is already set by middleware
|
||||
res.set_header("Access-Control-Allow-Credentials", "true");
|
||||
res.set_header("Access-Control-Allow-Methods", "POST");
|
||||
res.set_header("Access-Control-Allow-Headers", "*");
|
||||
return res.set_content("", "application/json; charset=utf-8");
|
||||
return res.set_content("", "text/html"); // blank response, no data
|
||||
});
|
||||
|
||||
svr->set_logger(log_server_request);
|
||||
|
||||
auto res_error = [](httplib::Response & res, json error_data) {
|
||||
json final_response {{"error", error_data}};
|
||||
res.set_content(final_response.dump(), "application/json; charset=utf-8");
|
||||
res.set_content(final_response.dump(), MIMETYPE_JSON);
|
||||
res.status = json_value(error_data, "code", 500);
|
||||
};
|
||||
|
||||
|
@ -2597,11 +2600,6 @@ int main(int argc, char ** argv) {
|
|||
svr->set_read_timeout (params.timeout_read);
|
||||
svr->set_write_timeout(params.timeout_write);
|
||||
|
||||
if (!svr->bind_to_port(params.hostname, params.port)) {
|
||||
fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", params.hostname.c_str(), params.port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, std::string> log_data;
|
||||
|
||||
log_data["hostname"] = params.hostname;
|
||||
|
@ -2617,35 +2615,6 @@ int main(int argc, char ** argv) {
|
|||
// Necessary similarity of prompt for slot selection
|
||||
ctx_server.slot_prompt_similarity = params.slot_prompt_similarity;
|
||||
|
||||
// load the model
|
||||
if (!ctx_server.load_model(params)) {
|
||||
state.store(SERVER_STATE_ERROR);
|
||||
return 1;
|
||||
} else {
|
||||
ctx_server.init();
|
||||
state.store(SERVER_STATE_READY);
|
||||
}
|
||||
|
||||
LOG_INFO("model loaded", {});
|
||||
|
||||
const auto model_meta = ctx_server.model_meta();
|
||||
|
||||
// if a custom chat template is not supplied, we will use the one that comes with the model (if any)
|
||||
if (params.chat_template.empty()) {
|
||||
if (!ctx_server.validate_model_chat_template()) {
|
||||
LOG_WARNING("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
|
||||
params.chat_template = "chatml";
|
||||
}
|
||||
}
|
||||
|
||||
// print sample chat example to make it clear which template is used
|
||||
{
|
||||
LOG_INFO("chat template", {
|
||||
{"chat_example", llama_chat_format_example(ctx_server.model, params.chat_template)},
|
||||
{"built_in", params.chat_template.empty()},
|
||||
});
|
||||
}
|
||||
|
||||
//
|
||||
// Middlewares
|
||||
//
|
||||
|
@ -2689,8 +2658,6 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// API key is invalid or not provided
|
||||
// TODO: make another middleware for CORS related logic
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
res_error(res, format_error_response("Invalid API Key", ERROR_TYPE_AUTHENTICATION));
|
||||
|
||||
LOG_WARNING("Unauthorized: Invalid API Key", {});
|
||||
|
@ -2698,8 +2665,21 @@ int main(int argc, char ** argv) {
|
|||
return false;
|
||||
};
|
||||
|
||||
auto middleware_server_state = [&res_error, &state](const httplib::Request &, httplib::Response & res) {
|
||||
server_state current_state = state.load();
|
||||
if (current_state == SERVER_STATE_LOADING_MODEL) {
|
||||
res_error(res, format_error_response("Loading model", ERROR_TYPE_UNAVAILABLE));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// register server middlewares
|
||||
svr->set_pre_routing_handler([&middleware_validate_api_key](const httplib::Request & req, httplib::Response & res) {
|
||||
svr->set_pre_routing_handler([&middleware_validate_api_key, &middleware_server_state](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
if (!middleware_server_state(req, res)) {
|
||||
return httplib::Server::HandlerResponse::Handled;
|
||||
}
|
||||
if (!middleware_validate_api_key(req, res)) {
|
||||
return httplib::Server::HandlerResponse::Handled;
|
||||
}
|
||||
|
@ -2710,62 +2690,15 @@ int main(int argc, char ** argv) {
|
|||
// Route handlers (or controllers)
|
||||
//
|
||||
|
||||
const auto handle_health = [&](const httplib::Request & req, httplib::Response & res) {
|
||||
server_state current_state = state.load();
|
||||
switch (current_state) {
|
||||
case SERVER_STATE_READY:
|
||||
{
|
||||
// request slots data using task queue
|
||||
server_task task;
|
||||
task.id = ctx_server.queue_tasks.get_new_id();
|
||||
task.type = SERVER_TASK_TYPE_METRICS;
|
||||
task.id_target = -1;
|
||||
|
||||
ctx_server.queue_results.add_waiting_task_id(task.id);
|
||||
ctx_server.queue_tasks.post(task);
|
||||
|
||||
// get the result
|
||||
server_task_result result = ctx_server.queue_results.recv(task.id);
|
||||
ctx_server.queue_results.remove_waiting_task_id(task.id);
|
||||
|
||||
const int n_idle_slots = result.data.at("idle");
|
||||
const int n_processing_slots = result.data.at("processing");
|
||||
|
||||
json health = {
|
||||
{"status", "ok"},
|
||||
{"slots_idle", n_idle_slots},
|
||||
{"slots_processing", n_processing_slots}
|
||||
};
|
||||
|
||||
res.status = 200; // HTTP OK
|
||||
if (params.endpoint_slots && req.has_param("include_slots")) {
|
||||
health["slots"] = result.data.at("slots");
|
||||
}
|
||||
|
||||
if (n_idle_slots == 0) {
|
||||
health["status"] = "no slot available";
|
||||
if (req.has_param("fail_on_no_slot")) {
|
||||
res.status = 503; // HTTP Service Unavailable
|
||||
}
|
||||
}
|
||||
|
||||
res.set_content(health.dump(), "application/json");
|
||||
break;
|
||||
}
|
||||
case SERVER_STATE_LOADING_MODEL:
|
||||
{
|
||||
res_error(res, format_error_response("Loading model", ERROR_TYPE_UNAVAILABLE));
|
||||
} break;
|
||||
case SERVER_STATE_ERROR:
|
||||
{
|
||||
res_error(res, format_error_response("Model failed to load", ERROR_TYPE_SERVER));
|
||||
} break;
|
||||
}
|
||||
const auto handle_health = [&](const httplib::Request &, httplib::Response & res) {
|
||||
// error and loading states are handled by middleware
|
||||
json health = {{"status", "ok"}};
|
||||
res.set_content(health.dump(), "application/json");
|
||||
};
|
||||
|
||||
const auto handle_slots = [&](const httplib::Request &, httplib::Response & res) {
|
||||
const auto handle_slots = [&](const httplib::Request & req, httplib::Response & res) {
|
||||
if (!params.endpoint_slots) {
|
||||
res_error(res, format_error_response("This server does not support slots endpoint.", ERROR_TYPE_NOT_SUPPORTED));
|
||||
res_error(res, format_error_response("This server does not support slots endpoint. Start it without `--no-slots`", ERROR_TYPE_NOT_SUPPORTED));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2783,13 +2716,22 @@ int main(int argc, char ** argv) {
|
|||
server_task_result result = ctx_server.queue_results.recv(task.id);
|
||||
ctx_server.queue_results.remove_waiting_task_id(task.id);
|
||||
|
||||
res.set_content(result.data.at("slots").dump(), "application/json");
|
||||
// optionally return "fail_on_no_slot" error
|
||||
const int n_idle_slots = result.data.at("idle");
|
||||
if (req.has_param("fail_on_no_slot")) {
|
||||
if (n_idle_slots == 0) {
|
||||
res_error(res, format_error_response("no slot available", ERROR_TYPE_UNAVAILABLE));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
res.set_content(result.data.at("slots").dump(), MIMETYPE_JSON);
|
||||
res.status = 200; // HTTP OK
|
||||
};
|
||||
|
||||
const auto handle_metrics = [&](const httplib::Request &, httplib::Response & res) {
|
||||
if (!params.endpoint_metrics) {
|
||||
res_error(res, format_error_response("This server does not support metrics endpoint.", ERROR_TYPE_NOT_SUPPORTED));
|
||||
res_error(res, format_error_response("This server does not support metrics endpoint. Start it with `--metrics`", ERROR_TYPE_NOT_SUPPORTED));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2914,7 +2856,7 @@ int main(int argc, char ** argv) {
|
|||
if (result.error) {
|
||||
res_error(res, result.data);
|
||||
} else {
|
||||
res.set_content(result.data.dump(), "application/json");
|
||||
res.set_content(result.data.dump(), MIMETYPE_JSON);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -2944,7 +2886,7 @@ int main(int argc, char ** argv) {
|
|||
if (result.error) {
|
||||
res_error(res, result.data);
|
||||
} else {
|
||||
res.set_content(result.data.dump(), "application/json");
|
||||
res.set_content(result.data.dump(), MIMETYPE_JSON);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -2964,13 +2906,11 @@ int main(int argc, char ** argv) {
|
|||
if (result.error) {
|
||||
res_error(res, result.data);
|
||||
} else {
|
||||
res.set_content(result.data.dump(), "application/json");
|
||||
res.set_content(result.data.dump(), MIMETYPE_JSON);
|
||||
}
|
||||
};
|
||||
|
||||
const auto handle_slots_action = [&res_error, &handle_slots_save, &handle_slots_restore, &handle_slots_erase](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
std::string id_slot_str = req.path_params.at("id_slot");
|
||||
int id_slot;
|
||||
|
||||
|
@ -2994,7 +2934,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
};
|
||||
|
||||
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||
const auto handle_props = [&ctx_server](const httplib::Request &, httplib::Response & res) {
|
||||
std::string template_key = "tokenizer.chat_template", curr_tmpl;
|
||||
int32_t tlen = llama_model_meta_val_str(ctx_server.model, template_key.c_str(), nullptr, 0);
|
||||
if (tlen > 0) {
|
||||
|
@ -3003,7 +2943,6 @@ int main(int argc, char ** argv) {
|
|||
curr_tmpl = std::string(curr_tmpl_buf.data(), tlen);
|
||||
}
|
||||
}
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
json data = {
|
||||
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||
|
@ -3011,7 +2950,7 @@ int main(int argc, char ** argv) {
|
|||
{ "chat_template", curr_tmpl.c_str() }
|
||||
};
|
||||
|
||||
res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||
res.set_content(data.dump(), MIMETYPE_JSON);
|
||||
};
|
||||
|
||||
const auto handle_completions = [&ctx_server, &res_error](const httplib::Request & req, httplib::Response & res) {
|
||||
|
@ -3020,8 +2959,6 @@ int main(int argc, char ** argv) {
|
|||
return;
|
||||
}
|
||||
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
json data = json::parse(req.body);
|
||||
|
||||
const int id_task = ctx_server.queue_tasks.get_new_id();
|
||||
|
@ -3032,7 +2969,7 @@ int main(int argc, char ** argv) {
|
|||
if (!json_value(data, "stream", false)) {
|
||||
server_task_result result = ctx_server.queue_results.recv(id_task);
|
||||
if (!result.error && result.stop) {
|
||||
res.set_content(result.data.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
|
||||
res.set_content(result.data.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
|
||||
} else {
|
||||
res_error(res, result.data);
|
||||
}
|
||||
|
@ -3095,9 +3032,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
};
|
||||
|
||||
const auto handle_models = [¶ms, &model_meta](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
const auto handle_models = [¶ms, &ctx_server](const httplib::Request &, httplib::Response & res) {
|
||||
json models = {
|
||||
{"object", "list"},
|
||||
{"data", {
|
||||
|
@ -3106,12 +3041,12 @@ int main(int argc, char ** argv) {
|
|||
{"object", "model"},
|
||||
{"created", std::time(0)},
|
||||
{"owned_by", "llamacpp"},
|
||||
{"meta", model_meta}
|
||||
{"meta", ctx_server.model_meta()}
|
||||
},
|
||||
}}
|
||||
};
|
||||
|
||||
res.set_content(models.dump(), "application/json; charset=utf-8");
|
||||
res.set_content(models.dump(), MIMETYPE_JSON);
|
||||
};
|
||||
|
||||
const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error](const httplib::Request & req, httplib::Response & res) {
|
||||
|
@ -3119,8 +3054,6 @@ int main(int argc, char ** argv) {
|
|||
res_error(res, format_error_response("This server does not support chat completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED));
|
||||
return;
|
||||
}
|
||||
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template);
|
||||
|
||||
const int id_task = ctx_server.queue_tasks.get_new_id();
|
||||
|
@ -3135,7 +3068,7 @@ int main(int argc, char ** argv) {
|
|||
if (!result.error && result.stop) {
|
||||
json result_oai = format_final_response_oaicompat(data, result.data, completion_id);
|
||||
|
||||
res.set_content(result_oai.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
|
||||
res.set_content(result_oai.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
|
||||
} else {
|
||||
res_error(res, result.data);
|
||||
}
|
||||
|
@ -3197,8 +3130,6 @@ int main(int argc, char ** argv) {
|
|||
return;
|
||||
}
|
||||
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
json data = json::parse(req.body);
|
||||
|
||||
const int id_task = ctx_server.queue_tasks.get_new_id();
|
||||
|
@ -3209,7 +3140,7 @@ int main(int argc, char ** argv) {
|
|||
if (!json_value(data, "stream", false)) {
|
||||
server_task_result result = ctx_server.queue_results.recv(id_task);
|
||||
if (!result.error && result.stop) {
|
||||
res.set_content(result.data.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
|
||||
res.set_content(result.data.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON);
|
||||
} else {
|
||||
res_error(res, result.data);
|
||||
}
|
||||
|
@ -3257,7 +3188,6 @@ int main(int argc, char ** argv) {
|
|||
};
|
||||
|
||||
const auto handle_tokenize = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
const json body = json::parse(req.body);
|
||||
|
||||
std::vector<llama_token> tokens;
|
||||
|
@ -3266,11 +3196,10 @@ int main(int argc, char ** argv) {
|
|||
tokens = ctx_server.tokenize(body.at("content"), add_special);
|
||||
}
|
||||
const json data = format_tokenizer_response(tokens);
|
||||
return res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||
return res.set_content(data.dump(), MIMETYPE_JSON);
|
||||
};
|
||||
|
||||
const auto handle_detokenize = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
const json body = json::parse(req.body);
|
||||
|
||||
std::string content;
|
||||
|
@ -3280,12 +3209,10 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
const json data = format_detokenized_response(content);
|
||||
return res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||
return res.set_content(data.dump(), MIMETYPE_JSON);
|
||||
};
|
||||
|
||||
const auto handle_embeddings = [&ctx_server, &res_error](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
const json body = json::parse(req.body);
|
||||
bool is_openai = false;
|
||||
|
||||
|
@ -3331,11 +3258,10 @@ int main(int argc, char ** argv) {
|
|||
json root = is_openai
|
||||
? format_embeddings_response_oaicompat(body, responses)
|
||||
: responses[0];
|
||||
return res.set_content(root.dump(), "application/json; charset=utf-8");
|
||||
return res.set_content(root.dump(), MIMETYPE_JSON);
|
||||
};
|
||||
|
||||
const auto handle_lora_adapters_list = [&](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) {
|
||||
json result = json::array();
|
||||
for (size_t i = 0; i < ctx_server.lora_adapters.size(); ++i) {
|
||||
auto & la = ctx_server.lora_adapters[i];
|
||||
|
@ -3345,13 +3271,11 @@ int main(int argc, char ** argv) {
|
|||
{"scale", la.scale},
|
||||
});
|
||||
}
|
||||
res.set_content(result.dump(), "application/json");
|
||||
res.set_content(result.dump(), MIMETYPE_JSON);
|
||||
res.status = 200; // HTTP OK
|
||||
};
|
||||
|
||||
const auto handle_lora_adapters_apply = [&](const httplib::Request & req, httplib::Response & res) {
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
const std::vector<json> body = json::parse(req.body);
|
||||
int max_idx = ctx_server.lora_adapters.size();
|
||||
|
||||
|
@ -3379,7 +3303,7 @@ int main(int argc, char ** argv) {
|
|||
server_task_result result = ctx_server.queue_results.recv(id_task);
|
||||
ctx_server.queue_results.remove_waiting_task_id(id_task);
|
||||
|
||||
res.set_content(result.data.dump(), "application/json");
|
||||
res.set_content(result.data.dump(), MIMETYPE_JSON);
|
||||
res.status = 200; // HTTP OK
|
||||
};
|
||||
|
||||
|
@ -3455,35 +3379,75 @@ int main(int argc, char ** argv) {
|
|||
log_data["n_threads_http"] = std::to_string(params.n_threads_http);
|
||||
svr->new_task_queue = [¶ms] { return new httplib::ThreadPool(params.n_threads_http); };
|
||||
|
||||
LOG_INFO("HTTP server listening", log_data);
|
||||
// clean up function, to be called before exit
|
||||
auto clean_up = [&svr]() {
|
||||
svr->stop();
|
||||
llama_backend_free();
|
||||
};
|
||||
|
||||
// run the HTTP server in a thread - see comment below
|
||||
std::thread t([&]() {
|
||||
if (!svr->listen_after_bind()) {
|
||||
state.store(SERVER_STATE_ERROR);
|
||||
return 1;
|
||||
// bind HTTP listen port, run the HTTP server in a thread
|
||||
if (!svr->bind_to_port(params.hostname, params.port)) {
|
||||
LOG_ERROR("couldn't bind HTTP server socket", {
|
||||
{"hostname", params.hostname},
|
||||
{"port", params.port},
|
||||
});
|
||||
clean_up();
|
||||
LOG_ERROR("exiting due to HTTP server error", {});
|
||||
return 1;
|
||||
}
|
||||
std::thread t([&]() { svr->listen_after_bind(); });
|
||||
svr->wait_until_ready();
|
||||
|
||||
LOG_INFO("HTTP server is listening", log_data);
|
||||
|
||||
// load the model
|
||||
LOG_INFO("loading model", log_data);
|
||||
if (!ctx_server.load_model(params)) {
|
||||
clean_up();
|
||||
t.join();
|
||||
LOG_ERROR("exiting due to model loading error", {});
|
||||
return 1;
|
||||
} else {
|
||||
ctx_server.init();
|
||||
state.store(SERVER_STATE_READY);
|
||||
|
||||
LOG_INFO("model loaded", {});
|
||||
|
||||
// if a custom chat template is not supplied, we will use the one that comes with the model (if any)
|
||||
if (params.chat_template.empty()) {
|
||||
if (!ctx_server.validate_model_chat_template()) {
|
||||
LOG_WARNING("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
|
||||
params.chat_template = "chatml";
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
});
|
||||
// print sample chat example to make it clear which template is used
|
||||
{
|
||||
LOG_INFO("chat template", {
|
||||
{"chat_example", llama_chat_format_example(ctx_server.model, params.chat_template)},
|
||||
{"built_in", params.chat_template.empty()},
|
||||
});
|
||||
}
|
||||
|
||||
ctx_server.queue_tasks.on_new_task(std::bind(
|
||||
&server_context::process_single_task, &ctx_server, std::placeholders::_1));
|
||||
ctx_server.queue_tasks.on_finish_multitask(std::bind(
|
||||
&server_context::on_finish_multitask, &ctx_server, std::placeholders::_1));
|
||||
ctx_server.queue_tasks.on_update_slots(std::bind(
|
||||
&server_context::update_slots, &ctx_server));
|
||||
ctx_server.queue_results.on_multitask_update(std::bind(
|
||||
&server_queue::update_multitask,
|
||||
&ctx_server.queue_tasks,
|
||||
std::placeholders::_1,
|
||||
std::placeholders::_2,
|
||||
std::placeholders::_3
|
||||
));
|
||||
ctx_server.queue_tasks.on_new_task(std::bind(
|
||||
&server_context::process_single_task, &ctx_server, std::placeholders::_1));
|
||||
ctx_server.queue_tasks.on_finish_multitask(std::bind(
|
||||
&server_context::on_finish_multitask, &ctx_server, std::placeholders::_1));
|
||||
ctx_server.queue_tasks.on_update_slots(std::bind(
|
||||
&server_context::update_slots, &ctx_server));
|
||||
ctx_server.queue_results.on_multitask_update(std::bind(
|
||||
&server_queue::update_multitask,
|
||||
&ctx_server.queue_tasks,
|
||||
std::placeholders::_1,
|
||||
std::placeholders::_2,
|
||||
std::placeholders::_3
|
||||
));
|
||||
|
||||
shutdown_handler = [&](int) {
|
||||
ctx_server.queue_tasks.terminate();
|
||||
};
|
||||
shutdown_handler = [&](int) {
|
||||
ctx_server.queue_tasks.terminate();
|
||||
};
|
||||
ctx_server.queue_tasks.start_loop();
|
||||
}
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
struct sigaction sigint_action;
|
||||
|
@ -3499,12 +3463,8 @@ int main(int argc, char ** argv) {
|
|||
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
||||
#endif
|
||||
|
||||
ctx_server.queue_tasks.start_loop();
|
||||
|
||||
svr->stop();
|
||||
clean_up();
|
||||
t.join();
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -205,27 +205,20 @@ def step_start_server(context):
|
|||
async def step_wait_for_the_server_to_be_started(context, expecting_status: Literal['healthy', 'ready', 'idle', 'busy'] | str):
|
||||
match expecting_status:
|
||||
case 'healthy':
|
||||
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||
timeout=30)
|
||||
await wait_for_slots_status(context, context.base_url, 200,
|
||||
timeout=30)
|
||||
|
||||
case 'ready' | 'idle':
|
||||
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||
timeout=30,
|
||||
params={'fail_on_no_slot': 0, 'include_slots': 0},
|
||||
slots_idle=context.n_slots,
|
||||
slots_processing=0,
|
||||
expected_slots=[{'id': slot_id, 'state': 0}
|
||||
for slot_id in
|
||||
range(context.n_slots if context.n_slots else 1)])
|
||||
await wait_for_slots_status(context, context.base_url, 200,
|
||||
timeout=30,
|
||||
params={'fail_on_no_slot': 1},
|
||||
slots_idle=context.n_slots,
|
||||
slots_processing=0)
|
||||
case 'busy':
|
||||
await wait_for_health_status(context, context.base_url, 503,
|
||||
'no slot available',
|
||||
params={'fail_on_no_slot': 0, 'include_slots': 0},
|
||||
slots_idle=0,
|
||||
slots_processing=context.n_slots,
|
||||
expected_slots=[{'id': slot_id, 'state': 1}
|
||||
for slot_id in
|
||||
range(context.n_slots if context.n_slots else 1)])
|
||||
await wait_for_slots_status(context, context.base_url, 503,
|
||||
params={'fail_on_no_slot': 1},
|
||||
slots_idle=0,
|
||||
slots_processing=context.n_slots)
|
||||
case _:
|
||||
assert False, "unknown status"
|
||||
|
||||
|
@ -1187,17 +1180,15 @@ async def gather_tasks_results(context):
|
|||
return n_completions
|
||||
|
||||
|
||||
async def wait_for_health_status(context,
|
||||
base_url,
|
||||
expected_http_status_code,
|
||||
expected_health_status,
|
||||
timeout=3,
|
||||
params=None,
|
||||
slots_idle=None,
|
||||
slots_processing=None,
|
||||
expected_slots=None):
|
||||
async def wait_for_slots_status(context,
|
||||
base_url,
|
||||
expected_http_status_code,
|
||||
timeout=3,
|
||||
params=None,
|
||||
slots_idle=None,
|
||||
slots_processing=None):
|
||||
if context.debug:
|
||||
print(f"Starting checking for health for expected_health_status={expected_health_status}")
|
||||
print(f"Starting checking for health for expected_http_status_code={expected_http_status_code}")
|
||||
interval = 0.5
|
||||
counter = 0
|
||||
if 'GITHUB_ACTIONS' in os.environ:
|
||||
|
@ -1205,26 +1196,19 @@ async def wait_for_health_status(context,
|
|||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
while True:
|
||||
async with await session.get(f'{base_url}/health', params=params) as health_response:
|
||||
status_code = health_response.status
|
||||
health = await health_response.json()
|
||||
async with await session.get(f'{base_url}/slots', params=params) as slots_response:
|
||||
status_code = slots_response.status
|
||||
slots = await slots_response.json()
|
||||
if context.debug:
|
||||
print(f"HEALTH - response for expected health status='{expected_health_status}' on "
|
||||
f"'{base_url}/health'?{params} is {health}\n")
|
||||
if (status_code == expected_http_status_code
|
||||
and health['status'] == expected_health_status
|
||||
and (slots_idle is None or health['slots_idle'] == slots_idle)
|
||||
and (slots_processing is None or health['slots_processing'] == slots_processing)):
|
||||
if expected_slots is not None:
|
||||
assert_slots_status(health['slots'], expected_slots)
|
||||
return
|
||||
if (status_code == expected_http_status_code
|
||||
and health['status'] == expected_health_status
|
||||
and (slots_idle is None or health['slots_idle'] == slots_idle)
|
||||
and (slots_processing is None or health['slots_processing'] == slots_processing)):
|
||||
if expected_slots is not None:
|
||||
assert_slots_status(health['slots'], expected_slots)
|
||||
print(f"slots responses {slots}\n")
|
||||
if status_code == 503 and status_code == expected_http_status_code:
|
||||
return
|
||||
if status_code == 200 and status_code == expected_http_status_code:
|
||||
n_slots_idle = sum(1 if slot["state"] == 0 else 0 for slot in slots)
|
||||
n_slots_processing = sum(1 if slot["state"] != 0 else 0 for slot in slots)
|
||||
if ((slots_idle is None or slots_idle == n_slots_idle)
|
||||
and (slots_processing is None or slots_processing == n_slots_processing)):
|
||||
return
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
counter += interval
|
||||
|
@ -1238,7 +1222,7 @@ async def wait_for_health_status(context,
|
|||
if n_completions > 0:
|
||||
return
|
||||
|
||||
assert False, f'{expected_health_status} timeout exceeded {counter}s>={timeout}'
|
||||
assert False, f'slots check timeout exceeded {counter}s>={timeout}'
|
||||
|
||||
|
||||
def assert_embeddings(embeddings):
|
||||
|
|
|
@ -362,7 +362,7 @@ int main(int raw_argc, char ** raw_argv) {
|
|||
prompt = stdin_buffer.str();
|
||||
}
|
||||
|
||||
const bool model_wants_add_bos = llama_should_add_bos_token(model);
|
||||
const bool model_wants_add_bos = llama_add_bos_token(model);
|
||||
const bool add_bos = model_wants_add_bos && !no_bos;
|
||||
const bool parse_special = !no_parse_special;
|
||||
|
||||
|
|
|
@ -1018,10 +1018,6 @@ static bool ggml_is_view_op(enum ggml_op op) {
|
|||
#define GGML_SCHED_MAX_BACKENDS 16
|
||||
#endif
|
||||
|
||||
#ifndef GGML_SCHED_MAX_SPLITS
|
||||
#define GGML_SCHED_MAX_SPLITS 2048
|
||||
#endif
|
||||
|
||||
#ifndef GGML_SCHED_MAX_SPLIT_INPUTS
|
||||
#define GGML_SCHED_MAX_SPLIT_INPUTS GGML_MAX_SRC
|
||||
#endif
|
||||
|
@ -1125,7 +1121,8 @@ static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, co
|
|||
}
|
||||
|
||||
#if 0
|
||||
static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
|
||||
#define GGML_SCHED_MAX_SPLITS_DEBUG 4096
|
||||
static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
|
||||
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
|
||||
#define GET_CAUSE(node) causes[hash_id(node)]
|
||||
#else
|
||||
|
@ -1549,7 +1546,6 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
|
|||
sched->splits = realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split));
|
||||
GGML_ASSERT(sched->splits != NULL);
|
||||
}
|
||||
GGML_ASSERT(i_split < GGML_SCHED_MAX_SPLITS);
|
||||
split = &sched->splits[i_split];
|
||||
split->backend_id = node_backend_id;
|
||||
split->i_start = i;
|
||||
|
@ -1865,13 +1861,14 @@ ggml_backend_sched_t ggml_backend_sched_new(
|
|||
sched->hv_tensor_backend_ids = malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
|
||||
sched->hv_tensor_copies = malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
|
||||
|
||||
const size_t nodes_size = graph_size + GGML_SCHED_MAX_SPLITS*GGML_SCHED_MAX_SPLIT_INPUTS*2;
|
||||
const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph
|
||||
const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2;
|
||||
sched->node_backend_ids = calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
|
||||
sched->leaf_backend_ids = calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
|
||||
sched->prev_node_backend_ids = calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
|
||||
sched->prev_leaf_backend_ids = calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
|
||||
|
||||
sched->context_buffer_size = GGML_SCHED_MAX_SPLITS*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false);
|
||||
sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false);
|
||||
sched->context_buffer = malloc(sched->context_buffer_size);
|
||||
|
||||
const int initial_splits_capacity = 16;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -3,9 +3,6 @@
|
|||
extern unsigned char matmul_f16_fp32_data[10276];
|
||||
const uint64_t matmul_f16_fp32_len = 10276;
|
||||
|
||||
extern unsigned char matmul_q8_0_f32_fp32_data[10488];
|
||||
const uint64_t matmul_q8_0_f32_fp32_len = 10488;
|
||||
|
||||
extern unsigned char matmul_f32_f16_fp32_data[10260];
|
||||
const uint64_t matmul_f32_f16_fp32_len = 10260;
|
||||
|
||||
|
@ -15,20 +12,20 @@ const uint64_t matmul_f16_aligned_fp32_len = 10288;
|
|||
extern unsigned char matmul_f32_f16_aligned_fp32_data[10240];
|
||||
const uint64_t matmul_f32_f16_aligned_fp32_len = 10240;
|
||||
|
||||
extern unsigned char matmul_f32_f32_aligned_fp32_data[10124];
|
||||
const uint64_t matmul_f32_f32_aligned_fp32_len = 10124;
|
||||
extern unsigned char matmul_f32_f32_fp32_data[10208];
|
||||
const uint64_t matmul_f32_f32_fp32_len = 10208;
|
||||
|
||||
extern unsigned char matmul_f16_f32_fp32_data[10260];
|
||||
const uint64_t matmul_f16_f32_fp32_len = 10260;
|
||||
|
||||
extern unsigned char matmul_f32_f32_fp32_data[10208];
|
||||
const uint64_t matmul_f32_f32_fp32_len = 10208;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_fp32_data[10444];
|
||||
const uint64_t matmul_q4_0_f32_fp32_len = 10444;
|
||||
|
||||
extern unsigned char matmul_f16_f32_aligned_fp32_data[10240];
|
||||
const uint64_t matmul_f16_f32_aligned_fp32_len = 10240;
|
||||
extern unsigned char matmul_f32_f32_aligned_fp32_data[10124];
|
||||
const uint64_t matmul_f32_f32_aligned_fp32_len = 10124;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_aligned_fp32_data[10408];
|
||||
const uint64_t matmul_q4_0_f32_aligned_fp32_len = 10408;
|
||||
|
||||
extern unsigned char matmul_q4_1_f32_fp32_data[10512];
|
||||
const uint64_t matmul_q4_1_f32_fp32_len = 10512;
|
||||
|
@ -36,203 +33,203 @@ const uint64_t matmul_q4_1_f32_fp32_len = 10512;
|
|||
extern unsigned char matmul_q5_0_f32_fp32_data[10956];
|
||||
const uint64_t matmul_q5_0_f32_fp32_len = 10956;
|
||||
|
||||
extern unsigned char matmul_q4_1_f32_aligned_fp32_data[10476];
|
||||
const uint64_t matmul_q4_1_f32_aligned_fp32_len = 10476;
|
||||
extern unsigned char matmul_q5_1_f32_fp32_data[10836];
|
||||
const uint64_t matmul_q5_1_f32_fp32_len = 10836;
|
||||
|
||||
extern unsigned char matmul_q5_0_f32_aligned_fp32_data[10920];
|
||||
const uint64_t matmul_q5_0_f32_aligned_fp32_len = 10920;
|
||||
|
||||
extern unsigned char matmul_q8_0_f32_aligned_fp32_data[10452];
|
||||
const uint64_t matmul_q8_0_f32_aligned_fp32_len = 10452;
|
||||
extern unsigned char matmul_q5_1_f32_aligned_fp32_data[10800];
|
||||
const uint64_t matmul_q5_1_f32_aligned_fp32_len = 10800;
|
||||
|
||||
extern unsigned char matmul_q2_k_f32_fp32_data[11120];
|
||||
const uint64_t matmul_q2_k_f32_fp32_len = 11120;
|
||||
|
||||
extern unsigned char matmul_q5_1_f32_aligned_fp32_data[10800];
|
||||
const uint64_t matmul_q5_1_f32_aligned_fp32_len = 10800;
|
||||
extern unsigned char matmul_q8_0_f32_aligned_fp32_data[10452];
|
||||
const uint64_t matmul_q8_0_f32_aligned_fp32_len = 10452;
|
||||
|
||||
extern unsigned char matmul_q2_k_f32_aligned_fp32_data[11068];
|
||||
const uint64_t matmul_q2_k_f32_aligned_fp32_len = 11068;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_aligned_fp32_data[10408];
|
||||
const uint64_t matmul_q4_0_f32_aligned_fp32_len = 10408;
|
||||
extern unsigned char matmul_q8_0_f32_fp32_data[10488];
|
||||
const uint64_t matmul_q8_0_f32_fp32_len = 10488;
|
||||
|
||||
extern unsigned char matmul_q3_k_f32_fp32_data[13024];
|
||||
const uint64_t matmul_q3_k_f32_fp32_len = 13024;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_aligned_fp32_data[11900];
|
||||
const uint64_t matmul_q4_k_f32_aligned_fp32_len = 11900;
|
||||
|
||||
extern unsigned char matmul_q3_k_f32_aligned_fp32_data[12972];
|
||||
const uint64_t matmul_q3_k_f32_aligned_fp32_len = 12972;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_fp32_data[11952];
|
||||
const uint64_t matmul_q4_k_f32_fp32_len = 11952;
|
||||
extern unsigned char matmul_q2_k_f32_aligned_fp32_data[11068];
|
||||
const uint64_t matmul_q2_k_f32_aligned_fp32_len = 11068;
|
||||
|
||||
extern unsigned char matmul_f16_f32_aligned_fp32_data[10240];
|
||||
const uint64_t matmul_f16_f32_aligned_fp32_len = 10240;
|
||||
|
||||
extern unsigned char matmul_q5_k_f32_fp32_data[12476];
|
||||
const uint64_t matmul_q5_k_f32_fp32_len = 12476;
|
||||
|
||||
extern unsigned char matmul_q5_k_f32_aligned_fp32_data[12424];
|
||||
const uint64_t matmul_q5_k_f32_aligned_fp32_len = 12424;
|
||||
|
||||
extern unsigned char matmul_q6_k_f32_fp32_data[11644];
|
||||
const uint64_t matmul_q6_k_f32_fp32_len = 11644;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_fp32_data[11952];
|
||||
const uint64_t matmul_q4_k_f32_fp32_len = 11952;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_aligned_fp32_data[11900];
|
||||
const uint64_t matmul_q4_k_f32_aligned_fp32_len = 11900;
|
||||
|
||||
extern unsigned char matmul_q5_k_f32_aligned_fp32_data[12424];
|
||||
const uint64_t matmul_q5_k_f32_aligned_fp32_len = 12424;
|
||||
|
||||
extern unsigned char matmul_q6_k_f32_aligned_fp32_data[11592];
|
||||
const uint64_t matmul_q6_k_f32_aligned_fp32_len = 11592;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_fp32_data[13496];
|
||||
const uint64_t matmul_id_q3_k_f32_fp32_len = 13496;
|
||||
|
||||
extern unsigned char matmul_iq4_nl_f32_fp32_data[10928];
|
||||
const uint64_t matmul_iq4_nl_f32_fp32_len = 10928;
|
||||
|
||||
extern unsigned char matmul_id_f32_f16_fp32_data[10732];
|
||||
const uint64_t matmul_id_f32_f16_fp32_len = 10732;
|
||||
|
||||
extern unsigned char matmul_id_f16_fp32_data[10748];
|
||||
const uint64_t matmul_id_f16_fp32_len = 10748;
|
||||
extern unsigned char matmul_iq4_nl_f32_aligned_fp32_data[10892];
|
||||
const uint64_t matmul_iq4_nl_f32_aligned_fp32_len = 10892;
|
||||
|
||||
extern unsigned char matmul_id_f32_f16_aligned_fp32_data[10928];
|
||||
const uint64_t matmul_id_f32_f16_aligned_fp32_len = 10928;
|
||||
|
||||
extern unsigned char matmul_id_q4_k_f32_fp32_data[12424];
|
||||
const uint64_t matmul_id_q4_k_f32_fp32_len = 12424;
|
||||
|
||||
extern unsigned char matmul_id_f32_f32_aligned_fp32_data[10812];
|
||||
const uint64_t matmul_id_f32_f32_aligned_fp32_len = 10812;
|
||||
|
||||
extern unsigned char matmul_id_f16_f32_fp32_data[10732];
|
||||
const uint64_t matmul_id_f16_f32_fp32_len = 10732;
|
||||
|
||||
extern unsigned char matmul_id_f16_aligned_fp32_data[10976];
|
||||
const uint64_t matmul_id_f16_aligned_fp32_len = 10976;
|
||||
extern unsigned char matmul_id_f16_fp32_data[10748];
|
||||
const uint64_t matmul_id_f16_fp32_len = 10748;
|
||||
|
||||
extern unsigned char matmul_id_f16_f32_aligned_fp32_data[10928];
|
||||
const uint64_t matmul_id_f16_f32_aligned_fp32_len = 10928;
|
||||
|
||||
extern unsigned char matmul_id_q4_0_f32_fp32_data[10916];
|
||||
const uint64_t matmul_id_q4_0_f32_fp32_len = 10916;
|
||||
extern unsigned char matmul_id_f16_f32_fp32_data[10732];
|
||||
const uint64_t matmul_id_f16_f32_fp32_len = 10732;
|
||||
|
||||
extern unsigned char matmul_id_f32_f32_fp32_data[10680];
|
||||
const uint64_t matmul_id_f32_f32_fp32_len = 10680;
|
||||
|
||||
extern unsigned char matmul_id_f32_f32_aligned_fp32_data[10812];
|
||||
const uint64_t matmul_id_f32_f32_aligned_fp32_len = 10812;
|
||||
|
||||
extern unsigned char matmul_id_q4_0_f32_aligned_fp32_data[11096];
|
||||
const uint64_t matmul_id_q4_0_f32_aligned_fp32_len = 11096;
|
||||
|
||||
extern unsigned char matmul_id_q6_k_f32_fp32_data[12116];
|
||||
const uint64_t matmul_id_q6_k_f32_fp32_len = 12116;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_aligned_fp32_data[11164];
|
||||
const uint64_t matmul_id_q4_1_f32_aligned_fp32_len = 11164;
|
||||
|
||||
extern unsigned char matmul_f32_f16_aligned_data[11360];
|
||||
const uint64_t matmul_f32_f16_aligned_len = 11360;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_aligned_fp32_data[11580];
|
||||
const uint64_t matmul_id_iq4_nl_f32_aligned_fp32_len = 11580;
|
||||
extern unsigned char matmul_id_q5_0_f32_fp32_data[11412];
|
||||
const uint64_t matmul_id_q5_0_f32_fp32_len = 11412;
|
||||
|
||||
extern unsigned char matmul_id_q5_1_f32_fp32_data[11308];
|
||||
const uint64_t matmul_id_q5_1_f32_fp32_len = 11308;
|
||||
|
||||
extern unsigned char matmul_id_q4_0_f32_fp32_data[10916];
|
||||
const uint64_t matmul_id_q4_0_f32_fp32_len = 10916;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_fp32_data[10984];
|
||||
const uint64_t matmul_id_q4_1_f32_fp32_len = 10984;
|
||||
|
||||
extern unsigned char matmul_id_q5_0_f32_aligned_fp32_data[11592];
|
||||
const uint64_t matmul_id_q5_0_f32_aligned_fp32_len = 11592;
|
||||
|
||||
extern unsigned char matmul_id_q5_1_f32_aligned_fp32_data[11488];
|
||||
const uint64_t matmul_id_q5_1_f32_aligned_fp32_len = 11488;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_aligned_fp32_data[11164];
|
||||
const uint64_t matmul_id_q4_1_f32_aligned_fp32_len = 11164;
|
||||
|
||||
extern unsigned char matmul_id_q8_0_f32_fp32_data[10960];
|
||||
const uint64_t matmul_id_q8_0_f32_fp32_len = 10960;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_fp32_data[13496];
|
||||
const uint64_t matmul_id_q3_k_f32_fp32_len = 13496;
|
||||
|
||||
extern unsigned char matmul_id_q8_0_f32_aligned_fp32_data[11140];
|
||||
const uint64_t matmul_id_q8_0_f32_aligned_fp32_len = 11140;
|
||||
|
||||
extern unsigned char matmul_id_q2_k_f32_fp32_data[11592];
|
||||
const uint64_t matmul_id_q2_k_f32_fp32_len = 11592;
|
||||
|
||||
extern unsigned char matmul_id_q2_k_f32_aligned_fp32_data[11756];
|
||||
const uint64_t matmul_id_q2_k_f32_aligned_fp32_len = 11756;
|
||||
|
||||
extern unsigned char matmul_iq4_nl_f32_aligned_fp32_data[10892];
|
||||
const uint64_t matmul_iq4_nl_f32_aligned_fp32_len = 10892;
|
||||
|
||||
extern unsigned char matmul_id_f32_f32_fp32_data[10680];
|
||||
const uint64_t matmul_id_f32_f32_fp32_len = 10680;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_aligned_fp32_data[13660];
|
||||
const uint64_t matmul_id_q3_k_f32_aligned_fp32_len = 13660;
|
||||
|
||||
extern unsigned char matmul_q5_1_f32_data[10956];
|
||||
const uint64_t matmul_q5_1_f32_len = 10956;
|
||||
|
||||
extern unsigned char matmul_q5_0_f32_aligned_data[11668];
|
||||
const uint64_t matmul_q5_0_f32_aligned_len = 11668;
|
||||
|
||||
extern unsigned char matmul_id_q5_k_f32_aligned_fp32_data[13112];
|
||||
const uint64_t matmul_id_q5_k_f32_aligned_fp32_len = 13112;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_fp32_data[10984];
|
||||
const uint64_t matmul_id_q4_1_f32_fp32_len = 10984;
|
||||
|
||||
extern unsigned char matmul_id_q6_k_f32_aligned_fp32_data[12280];
|
||||
const uint64_t matmul_id_q6_k_f32_aligned_fp32_len = 12280;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_fp32_data[11400];
|
||||
const uint64_t matmul_id_iq4_nl_f32_fp32_len = 11400;
|
||||
|
||||
extern unsigned char matmul_id_q5_0_f32_fp32_data[11412];
|
||||
const uint64_t matmul_id_q5_0_f32_fp32_len = 11412;
|
||||
|
||||
extern unsigned char matmul_f32_f16_data[10332];
|
||||
const uint64_t matmul_f32_f16_len = 10332;
|
||||
|
||||
extern unsigned char matmul_id_q5_0_f32_aligned_fp32_data[11592];
|
||||
const uint64_t matmul_id_q5_0_f32_aligned_fp32_len = 11592;
|
||||
|
||||
extern unsigned char matmul_f16_aligned_data[11200];
|
||||
const uint64_t matmul_f16_aligned_len = 11200;
|
||||
|
||||
extern unsigned char matmul_f32_f32_aligned_data[11432];
|
||||
const uint64_t matmul_f32_f32_aligned_len = 11432;
|
||||
|
||||
extern unsigned char matmul_f16_f32_data[10332];
|
||||
const uint64_t matmul_f16_f32_len = 10332;
|
||||
|
||||
extern unsigned char matmul_f32_f32_data[10324];
|
||||
const uint64_t matmul_f32_f32_len = 10324;
|
||||
|
||||
extern unsigned char matmul_f16_f32_aligned_data[11360];
|
||||
const uint64_t matmul_f16_f32_aligned_len = 11360;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_data[12072];
|
||||
const uint64_t matmul_q4_k_f32_len = 12072;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_data[10564];
|
||||
const uint64_t matmul_q4_0_f32_len = 10564;
|
||||
|
||||
extern unsigned char matmul_q6_k_f32_aligned_data[12324];
|
||||
const uint64_t matmul_q6_k_f32_aligned_len = 12324;
|
||||
|
||||
extern unsigned char matmul_q4_1_f32_data[10632];
|
||||
const uint64_t matmul_q4_1_f32_len = 10632;
|
||||
|
||||
extern unsigned char matmul_q5_0_f32_data[11076];
|
||||
const uint64_t matmul_q5_0_f32_len = 11076;
|
||||
|
||||
extern unsigned char matmul_q4_1_f32_aligned_data[11224];
|
||||
const uint64_t matmul_q4_1_f32_aligned_len = 11224;
|
||||
extern unsigned char matmul_id_q5_k_f32_fp32_data[12948];
|
||||
const uint64_t matmul_id_q5_k_f32_fp32_len = 12948;
|
||||
|
||||
extern unsigned char matmul_id_q4_k_f32_aligned_fp32_data[12588];
|
||||
const uint64_t matmul_id_q4_k_f32_aligned_fp32_len = 12588;
|
||||
|
||||
extern unsigned char matmul_id_q4_k_f32_fp32_data[12424];
|
||||
const uint64_t matmul_id_q4_k_f32_fp32_len = 12424;
|
||||
|
||||
extern unsigned char matmul_id_q5_k_f32_aligned_fp32_data[13112];
|
||||
const uint64_t matmul_id_q5_k_f32_aligned_fp32_len = 13112;
|
||||
|
||||
extern unsigned char matmul_id_q6_k_f32_aligned_fp32_data[12280];
|
||||
const uint64_t matmul_id_q6_k_f32_aligned_fp32_len = 12280;
|
||||
|
||||
extern unsigned char matmul_f32_f16_data[10332];
|
||||
const uint64_t matmul_f32_f16_len = 10332;
|
||||
|
||||
extern unsigned char matmul_id_q6_k_f32_fp32_data[12116];
|
||||
const uint64_t matmul_id_q6_k_f32_fp32_len = 12116;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_fp32_data[11400];
|
||||
const uint64_t matmul_id_iq4_nl_f32_fp32_len = 11400;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_aligned_fp32_data[11580];
|
||||
const uint64_t matmul_id_iq4_nl_f32_aligned_fp32_len = 11580;
|
||||
|
||||
extern unsigned char matmul_f32_f16_aligned_data[11360];
|
||||
const uint64_t matmul_f32_f16_aligned_len = 11360;
|
||||
|
||||
extern unsigned char matmul_f16_aligned_data[11200];
|
||||
const uint64_t matmul_f16_aligned_len = 11200;
|
||||
|
||||
extern unsigned char matmul_f16_f32_data[10332];
|
||||
const uint64_t matmul_f16_f32_len = 10332;
|
||||
|
||||
extern unsigned char matmul_f32_f32_aligned_data[11432];
|
||||
const uint64_t matmul_f32_f32_aligned_len = 11432;
|
||||
|
||||
extern unsigned char matmul_f32_f32_data[10324];
|
||||
const uint64_t matmul_f32_f32_len = 10324;
|
||||
|
||||
extern unsigned char matmul_f16_data[10316];
|
||||
const uint64_t matmul_f16_len = 10316;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_aligned_data[11156];
|
||||
const uint64_t matmul_q4_0_f32_aligned_len = 11156;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_data[10564];
|
||||
const uint64_t matmul_q4_0_f32_len = 10564;
|
||||
|
||||
extern unsigned char matmul_f16_f32_aligned_data[11360];
|
||||
const uint64_t matmul_f16_f32_aligned_len = 11360;
|
||||
|
||||
extern unsigned char matmul_q5_0_f32_data[11076];
|
||||
const uint64_t matmul_q5_0_f32_len = 11076;
|
||||
|
||||
extern unsigned char matmul_q5_0_f32_aligned_data[11668];
|
||||
const uint64_t matmul_q5_0_f32_aligned_len = 11668;
|
||||
|
||||
extern unsigned char matmul_q8_0_f32_data[10608];
|
||||
const uint64_t matmul_q8_0_f32_len = 10608;
|
||||
|
||||
extern unsigned char matmul_q5_1_f32_aligned_data[11548];
|
||||
const uint64_t matmul_q5_1_f32_aligned_len = 11548;
|
||||
|
||||
extern unsigned char matmul_q8_0_f32_aligned_data[11200];
|
||||
const uint64_t matmul_q8_0_f32_aligned_len = 11200;
|
||||
|
||||
extern unsigned char matmul_q2_k_f32_data[11240];
|
||||
const uint64_t matmul_q2_k_f32_len = 11240;
|
||||
|
||||
extern unsigned char matmul_id_q5_k_f32_fp32_data[12948];
|
||||
const uint64_t matmul_id_q5_k_f32_fp32_len = 12948;
|
||||
extern unsigned char matmul_id_q2_k_f32_aligned_fp32_data[11756];
|
||||
const uint64_t matmul_id_q2_k_f32_aligned_fp32_len = 11756;
|
||||
|
||||
extern unsigned char matmul_q4_1_f32_data[10632];
|
||||
const uint64_t matmul_q4_1_f32_len = 10632;
|
||||
|
||||
extern unsigned char matmul_q4_1_f32_aligned_data[11224];
|
||||
const uint64_t matmul_q4_1_f32_aligned_len = 11224;
|
||||
|
||||
extern unsigned char matmul_q8_0_f32_aligned_data[11200];
|
||||
const uint64_t matmul_q8_0_f32_aligned_len = 11200;
|
||||
|
||||
extern unsigned char matmul_q5_1_f32_aligned_data[11548];
|
||||
const uint64_t matmul_q5_1_f32_aligned_len = 11548;
|
||||
|
||||
extern unsigned char matmul_q5_1_f32_data[10956];
|
||||
const uint64_t matmul_q5_1_f32_len = 10956;
|
||||
|
||||
extern unsigned char matmul_q2_k_f32_aligned_data[11800];
|
||||
const uint64_t matmul_q2_k_f32_aligned_len = 11800;
|
||||
|
@ -243,173 +240,179 @@ const uint64_t matmul_q3_k_f32_len = 13144;
|
|||
extern unsigned char matmul_q3_k_f32_aligned_data[13704];
|
||||
const uint64_t matmul_q3_k_f32_aligned_len = 13704;
|
||||
|
||||
extern unsigned char matmul_f16_data[10316];
|
||||
const uint64_t matmul_f16_len = 10316;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_aligned_data[12648];
|
||||
const uint64_t matmul_q4_k_f32_aligned_len = 12648;
|
||||
|
||||
extern unsigned char matmul_q5_k_f32_data[12596];
|
||||
const uint64_t matmul_q5_k_f32_len = 12596;
|
||||
|
||||
extern unsigned char matmul_q5_k_f32_aligned_data[13172];
|
||||
const uint64_t matmul_q5_k_f32_aligned_len = 13172;
|
||||
|
||||
extern unsigned char matmul_q6_k_f32_data[11764];
|
||||
const uint64_t matmul_q6_k_f32_len = 11764;
|
||||
|
||||
extern unsigned char matmul_q6_k_f32_aligned_data[12324];
|
||||
const uint64_t matmul_q6_k_f32_aligned_len = 12324;
|
||||
|
||||
extern unsigned char matmul_iq4_nl_f32_data[11048];
|
||||
const uint64_t matmul_iq4_nl_f32_len = 11048;
|
||||
|
||||
extern unsigned char matmul_q4_0_f32_aligned_data[11156];
|
||||
const uint64_t matmul_q4_0_f32_aligned_len = 11156;
|
||||
|
||||
extern unsigned char matmul_iq4_nl_f32_aligned_data[11640];
|
||||
const uint64_t matmul_iq4_nl_f32_aligned_len = 11640;
|
||||
|
||||
extern unsigned char matmul_id_f32_f16_data[10804];
|
||||
const uint64_t matmul_id_f32_f16_len = 10804;
|
||||
extern unsigned char matmul_q5_k_f32_aligned_data[13172];
|
||||
const uint64_t matmul_q5_k_f32_aligned_len = 13172;
|
||||
|
||||
extern unsigned char matmul_id_f16_data[10788];
|
||||
const uint64_t matmul_id_f16_len = 10788;
|
||||
|
||||
extern unsigned char matmul_id_f32_f16_aligned_data[12048];
|
||||
const uint64_t matmul_id_f32_f16_aligned_len = 12048;
|
||||
|
||||
extern unsigned char matmul_id_q8_0_f32_data[11080];
|
||||
const uint64_t matmul_id_q8_0_f32_len = 11080;
|
||||
|
||||
extern unsigned char matmul_id_f16_aligned_data[11888];
|
||||
const uint64_t matmul_id_f16_aligned_len = 11888;
|
||||
|
||||
extern unsigned char matmul_id_f32_f16_data[10804];
|
||||
const uint64_t matmul_id_f32_f16_len = 10804;
|
||||
|
||||
extern unsigned char matmul_id_f16_f32_data[10804];
|
||||
const uint64_t matmul_id_f16_f32_len = 10804;
|
||||
|
||||
extern unsigned char matmul_id_f32_f32_aligned_data[12120];
|
||||
const uint64_t matmul_id_f32_f32_aligned_len = 12120;
|
||||
|
||||
extern unsigned char matmul_id_q2_k_f32_aligned_data[12488];
|
||||
const uint64_t matmul_id_q2_k_f32_aligned_len = 12488;
|
||||
|
||||
extern unsigned char matmul_id_q6_k_f32_aligned_data[13012];
|
||||
const uint64_t matmul_id_q6_k_f32_aligned_len = 13012;
|
||||
|
||||
extern unsigned char matmul_id_q5_0_f32_aligned_data[12340];
|
||||
const uint64_t matmul_id_q5_0_f32_aligned_len = 12340;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_data[11104];
|
||||
const uint64_t matmul_id_q4_1_f32_len = 11104;
|
||||
|
||||
extern unsigned char matmul_id_q4_k_f32_data[12544];
|
||||
const uint64_t matmul_id_q4_k_f32_len = 12544;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_aligned_data[11912];
|
||||
const uint64_t matmul_id_q4_1_f32_aligned_len = 11912;
|
||||
|
||||
extern unsigned char matmul_id_f32_f32_data[10796];
|
||||
const uint64_t matmul_id_f32_f32_len = 10796;
|
||||
|
||||
extern unsigned char matmul_id_q5_1_f32_data[11428];
|
||||
const uint64_t matmul_id_q5_1_f32_len = 11428;
|
||||
|
||||
extern unsigned char matmul_id_q5_1_f32_aligned_data[12236];
|
||||
const uint64_t matmul_id_q5_1_f32_aligned_len = 12236;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_aligned_data[14392];
|
||||
const uint64_t matmul_id_q3_k_f32_aligned_len = 14392;
|
||||
|
||||
extern unsigned char matmul_id_q2_k_f32_data[11712];
|
||||
const uint64_t matmul_id_q2_k_f32_len = 11712;
|
||||
|
||||
extern unsigned char matmul_id_f16_f32_data[10804];
|
||||
const uint64_t matmul_id_f16_f32_len = 10804;
|
||||
|
||||
extern unsigned char matmul_id_f16_f32_aligned_data[12048];
|
||||
const uint64_t matmul_id_f16_f32_aligned_len = 12048;
|
||||
|
||||
extern unsigned char matmul_id_q4_0_f32_data[11036];
|
||||
const uint64_t matmul_id_q4_0_f32_len = 11036;
|
||||
|
||||
extern unsigned char matmul_id_q4_k_f32_aligned_data[13336];
|
||||
const uint64_t matmul_id_q4_k_f32_aligned_len = 13336;
|
||||
extern unsigned char matmul_id_q4_1_f32_data[11104];
|
||||
const uint64_t matmul_id_q4_1_f32_len = 11104;
|
||||
|
||||
extern unsigned char matmul_id_q4_1_f32_aligned_data[11912];
|
||||
const uint64_t matmul_id_q4_1_f32_aligned_len = 11912;
|
||||
|
||||
extern unsigned char matmul_id_q4_0_f32_aligned_data[11844];
|
||||
const uint64_t matmul_id_q4_0_f32_aligned_len = 11844;
|
||||
|
||||
extern unsigned char matmul_id_q5_k_f32_aligned_data[13860];
|
||||
const uint64_t matmul_id_q5_k_f32_aligned_len = 13860;
|
||||
extern unsigned char matmul_id_q5_1_f32_data[11428];
|
||||
const uint64_t matmul_id_q5_1_f32_len = 11428;
|
||||
|
||||
extern unsigned char matmul_id_q8_0_f32_aligned_data[11888];
|
||||
const uint64_t matmul_id_q8_0_f32_aligned_len = 11888;
|
||||
|
||||
extern unsigned char get_rows_f32_f32_data[3000];
|
||||
const uint64_t get_rows_f32_f32_len = 3000;
|
||||
extern unsigned char matmul_id_q5_0_f32_aligned_data[12340];
|
||||
const uint64_t matmul_id_q5_0_f32_aligned_len = 12340;
|
||||
|
||||
extern unsigned char matmul_id_q5_0_f32_data[11532];
|
||||
const uint64_t matmul_id_q5_0_f32_len = 11532;
|
||||
|
||||
extern unsigned char mul_mat_vec_f32_f32_f32_data[3760];
|
||||
const uint64_t mul_mat_vec_f32_f32_f32_len = 3760;
|
||||
extern unsigned char matmul_id_q5_1_f32_aligned_data[12236];
|
||||
const uint64_t matmul_id_q5_1_f32_aligned_len = 12236;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_data[11520];
|
||||
const uint64_t matmul_id_iq4_nl_f32_len = 11520;
|
||||
extern unsigned char matmul_id_q8_0_f32_data[11080];
|
||||
const uint64_t matmul_id_q8_0_f32_len = 11080;
|
||||
|
||||
extern unsigned char mul_mat_vec_f32_f16_f32_data[3828];
|
||||
const uint64_t mul_mat_vec_f32_f16_f32_len = 3828;
|
||||
extern unsigned char matmul_id_q8_0_f32_aligned_data[11888];
|
||||
const uint64_t matmul_id_q8_0_f32_aligned_len = 11888;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_aligned_data[12328];
|
||||
const uint64_t matmul_id_iq4_nl_f32_aligned_len = 12328;
|
||||
extern unsigned char matmul_id_q2_k_f32_data[11712];
|
||||
const uint64_t matmul_id_q2_k_f32_len = 11712;
|
||||
|
||||
extern unsigned char matmul_id_q2_k_f32_aligned_data[12488];
|
||||
const uint64_t matmul_id_q2_k_f32_aligned_len = 12488;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_data[13616];
|
||||
const uint64_t matmul_id_q3_k_f32_len = 13616;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_aligned_data[14392];
|
||||
const uint64_t matmul_id_q3_k_f32_aligned_len = 14392;
|
||||
|
||||
extern unsigned char matmul_id_q4_k_f32_data[12544];
|
||||
const uint64_t matmul_id_q4_k_f32_len = 12544;
|
||||
|
||||
extern unsigned char matmul_id_q5_k_f32_data[13068];
|
||||
const uint64_t matmul_id_q5_k_f32_len = 13068;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_f32_f32_data[3684];
|
||||
const uint64_t mul_mat_vec_id_f32_f32_len = 3684;
|
||||
|
||||
extern unsigned char dequant_f32_data[3200];
|
||||
const uint64_t dequant_f32_len = 3200;
|
||||
|
||||
extern unsigned char get_rows_f32_data[3052];
|
||||
const uint64_t get_rows_f32_len = 3052;
|
||||
extern unsigned char matmul_id_q4_k_f32_aligned_data[13336];
|
||||
const uint64_t matmul_id_q4_k_f32_aligned_len = 13336;
|
||||
|
||||
extern unsigned char matmul_id_q6_k_f32_data[12236];
|
||||
const uint64_t matmul_id_q6_k_f32_len = 12236;
|
||||
|
||||
extern unsigned char mul_mat_vec_f16_f32_f32_data[3828];
|
||||
const uint64_t mul_mat_vec_f16_f32_f32_len = 3828;
|
||||
extern unsigned char matmul_id_q5_k_f32_aligned_data[13860];
|
||||
const uint64_t matmul_id_q5_k_f32_aligned_len = 13860;
|
||||
|
||||
extern unsigned char mul_mat_vec_f16_f16_f32_data[3860];
|
||||
const uint64_t mul_mat_vec_f16_f16_f32_len = 3860;
|
||||
extern unsigned char matmul_id_q6_k_f32_aligned_data[13012];
|
||||
const uint64_t matmul_id_q6_k_f32_aligned_len = 13012;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_data[11520];
|
||||
const uint64_t matmul_id_iq4_nl_f32_len = 11520;
|
||||
|
||||
extern unsigned char mul_mat_vec_f32_f32_f32_data[3752];
|
||||
const uint64_t mul_mat_vec_f32_f32_f32_len = 3752;
|
||||
|
||||
extern unsigned char matmul_id_iq4_nl_f32_aligned_data[12328];
|
||||
const uint64_t matmul_id_iq4_nl_f32_aligned_len = 12328;
|
||||
|
||||
extern unsigned char mul_mat_vec_f32_f16_f32_data[3820];
|
||||
const uint64_t mul_mat_vec_f32_f16_f32_len = 3820;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_f32_f32_data[3676];
|
||||
const uint64_t mul_mat_vec_id_f32_f32_len = 3676;
|
||||
|
||||
extern unsigned char get_rows_f32_data[3052];
|
||||
const uint64_t get_rows_f32_len = 3052;
|
||||
|
||||
extern unsigned char dequant_f32_data[3200];
|
||||
const uint64_t dequant_f32_len = 3200;
|
||||
|
||||
extern unsigned char get_rows_f32_f32_data[3000];
|
||||
const uint64_t get_rows_f32_f32_len = 3000;
|
||||
|
||||
extern unsigned char mul_mat_vec_f16_f32_f32_data[3820];
|
||||
const uint64_t mul_mat_vec_f16_f32_f32_len = 3820;
|
||||
|
||||
extern unsigned char mul_mat_vec_f16_f16_f32_data[3852];
|
||||
const uint64_t mul_mat_vec_f16_f16_f32_len = 3852;
|
||||
|
||||
extern unsigned char get_rows_f16_data[3020];
|
||||
const uint64_t get_rows_f16_len = 3020;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_0_f16_f32_data[4252];
|
||||
const uint64_t mul_mat_vec_q4_0_f16_f32_len = 4252;
|
||||
extern unsigned char mul_mat_vec_id_f16_f32_data[3744];
|
||||
const uint64_t mul_mat_vec_id_f16_f32_len = 3744;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q4_0_f32_data[4144];
|
||||
const uint64_t mul_mat_vec_id_q4_0_f32_len = 4144;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_f16_f32_data[3752];
|
||||
const uint64_t mul_mat_vec_id_f16_f32_len = 3752;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_0_f32_f32_data[4220];
|
||||
const uint64_t mul_mat_vec_q4_0_f32_f32_len = 4220;
|
||||
extern unsigned char matmul_id_f16_aligned_fp32_data[10976];
|
||||
const uint64_t matmul_id_f16_aligned_fp32_len = 10976;
|
||||
|
||||
extern unsigned char get_rows_f16_f32_data[3052];
|
||||
const uint64_t get_rows_f16_f32_len = 3052;
|
||||
|
||||
extern unsigned char dequant_q4_0_data[5292];
|
||||
const uint64_t dequant_q4_0_len = 5292;
|
||||
extern unsigned char mul_mat_vec_q4_0_f16_f32_data[4244];
|
||||
const uint64_t mul_mat_vec_q4_0_f16_f32_len = 4244;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_0_f32_f32_data[4212];
|
||||
const uint64_t mul_mat_vec_q4_0_f32_f32_len = 4212;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q4_0_f32_data[4136];
|
||||
const uint64_t mul_mat_vec_id_q4_0_f32_len = 4136;
|
||||
|
||||
extern unsigned char get_rows_q4_0_data[3728];
|
||||
const uint64_t get_rows_q4_0_len = 3728;
|
||||
|
||||
extern unsigned char dequant_q4_0_data[5292];
|
||||
const uint64_t dequant_q4_0_len = 5292;
|
||||
|
||||
extern unsigned char get_rows_q4_0_f32_data[3712];
|
||||
const uint64_t get_rows_q4_0_f32_len = 3712;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_1_f32_f32_data[4304];
|
||||
const uint64_t mul_mat_vec_q4_1_f32_f32_len = 4304;
|
||||
extern unsigned char mul_mat_vec_q4_1_f16_f32_data[4328];
|
||||
const uint64_t mul_mat_vec_q4_1_f16_f32_len = 4328;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_1_f16_f32_data[4336];
|
||||
const uint64_t mul_mat_vec_q4_1_f16_f32_len = 4336;
|
||||
extern unsigned char mul_mat_vec_id_q4_1_f32_data[4204];
|
||||
const uint64_t mul_mat_vec_id_q4_1_f32_len = 4204;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q4_1_f32_data[4212];
|
||||
const uint64_t mul_mat_vec_id_q4_1_f32_len = 4212;
|
||||
extern unsigned char mul_mat_vec_q4_1_f32_f32_data[4296];
|
||||
const uint64_t mul_mat_vec_q4_1_f32_f32_len = 4296;
|
||||
|
||||
extern unsigned char dequant_q4_1_data[5248];
|
||||
const uint64_t dequant_q4_1_len = 5248;
|
||||
|
@ -417,152 +420,125 @@ const uint64_t dequant_q4_1_len = 5248;
|
|||
extern unsigned char get_rows_q4_1_data[3812];
|
||||
const uint64_t get_rows_q4_1_len = 3812;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_0_f32_f32_data[4748];
|
||||
const uint64_t mul_mat_vec_q5_0_f32_f32_len = 4748;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_0_f16_f32_data[4780];
|
||||
const uint64_t mul_mat_vec_q5_0_f16_f32_len = 4780;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q5_0_f32_data[4656];
|
||||
const uint64_t mul_mat_vec_id_q5_0_f32_len = 4656;
|
||||
|
||||
extern unsigned char get_rows_q4_1_f32_data[3796];
|
||||
const uint64_t get_rows_q4_1_f32_len = 3796;
|
||||
|
||||
extern unsigned char dequant_q5_0_data[6644];
|
||||
const uint64_t dequant_q5_0_len = 6644;
|
||||
extern unsigned char mul_mat_vec_q5_0_f32_f32_data[4740];
|
||||
const uint64_t mul_mat_vec_q5_0_f32_f32_len = 4740;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_0_f16_f32_data[4772];
|
||||
const uint64_t mul_mat_vec_q5_0_f16_f32_len = 4772;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q5_0_f32_data[4648];
|
||||
const uint64_t mul_mat_vec_id_q5_0_f32_len = 4648;
|
||||
|
||||
extern unsigned char get_rows_q5_0_data[4256];
|
||||
const uint64_t get_rows_q5_0_len = 4256;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_1_f32_f32_data[4644];
|
||||
const uint64_t mul_mat_vec_q5_1_f32_f32_len = 4644;
|
||||
extern unsigned char dequant_q5_0_data[6644];
|
||||
const uint64_t dequant_q5_0_len = 6644;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_1_f16_f32_data[4668];
|
||||
const uint64_t mul_mat_vec_q5_1_f16_f32_len = 4668;
|
||||
|
||||
extern unsigned char get_rows_q5_0_f32_data[4240];
|
||||
const uint64_t get_rows_q5_0_f32_len = 4240;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_1_f16_f32_data[4676];
|
||||
const uint64_t mul_mat_vec_q5_1_f16_f32_len = 4676;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q5_1_f32_data[4536];
|
||||
const uint64_t mul_mat_vec_id_q5_1_f32_len = 4536;
|
||||
extern unsigned char mul_mat_vec_id_q5_1_f32_data[4528];
|
||||
const uint64_t mul_mat_vec_id_q5_1_f32_len = 4528;
|
||||
|
||||
extern unsigned char dequant_q5_1_data[6412];
|
||||
const uint64_t dequant_q5_1_len = 6412;
|
||||
|
||||
extern unsigned char dequant_q8_0_data[4780];
|
||||
const uint64_t dequant_q8_0_len = 4780;
|
||||
|
||||
extern unsigned char get_rows_q5_1_data[4152];
|
||||
const uint64_t get_rows_q5_1_len = 4152;
|
||||
|
||||
extern unsigned char mul_mat_vec_q8_0_f16_f32_data[4208];
|
||||
const uint64_t mul_mat_vec_q8_0_f16_f32_len = 4208;
|
||||
|
||||
extern unsigned char get_rows_q5_1_f32_data[4136];
|
||||
const uint64_t get_rows_q5_1_f32_len = 4136;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q8_0_f32_data[4100];
|
||||
const uint64_t mul_mat_vec_id_q8_0_f32_len = 4100;
|
||||
extern unsigned char mul_mat_vec_id_q8_0_f32_data[4092];
|
||||
const uint64_t mul_mat_vec_id_q8_0_f32_len = 4092;
|
||||
|
||||
extern unsigned char dequant_q8_0_data[4780];
|
||||
const uint64_t dequant_q8_0_len = 4780;
|
||||
|
||||
extern unsigned char mul_mat_vec_q8_0_f16_f32_data[4200];
|
||||
const uint64_t mul_mat_vec_q8_0_f16_f32_len = 4200;
|
||||
|
||||
extern unsigned char get_rows_q8_0_data[3668];
|
||||
const uint64_t get_rows_q8_0_len = 3668;
|
||||
|
||||
extern unsigned char mul_mat_vec_q8_0_f32_f32_data[4176];
|
||||
const uint64_t mul_mat_vec_q8_0_f32_f32_len = 4176;
|
||||
|
||||
extern unsigned char mul_mat_vec_q2_k_f32_f32_data[8504];
|
||||
const uint64_t mul_mat_vec_q2_k_f32_f32_len = 8504;
|
||||
|
||||
extern unsigned char mul_mat_vec_q3_k_f16_f32_data[10176];
|
||||
const uint64_t mul_mat_vec_q3_k_f16_f32_len = 10176;
|
||||
|
||||
extern unsigned char get_rows_q8_0_f32_data[3652];
|
||||
const uint64_t get_rows_q8_0_f32_len = 3652;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q2_k_f32_data[8412];
|
||||
const uint64_t mul_mat_vec_id_q2_k_f32_len = 8412;
|
||||
extern unsigned char mul_mat_vec_q2_k_f32_f32_data[8480];
|
||||
const uint64_t mul_mat_vec_q2_k_f32_f32_len = 8480;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_k_f32_f32_data[10036];
|
||||
const uint64_t mul_mat_vec_q4_k_f32_f32_len = 10036;
|
||||
extern unsigned char mul_mat_vec_q2_k_f16_f32_data[8736];
|
||||
const uint64_t mul_mat_vec_q2_k_f16_f32_len = 8736;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q2_k_f32_data[8388];
|
||||
const uint64_t mul_mat_vec_id_q2_k_f32_len = 8388;
|
||||
|
||||
extern unsigned char dequant_q2_k_data[4032];
|
||||
const uint64_t dequant_q2_k_len = 4032;
|
||||
|
||||
extern unsigned char mul_mat_vec_q3_k_f32_f32_data[10040];
|
||||
const uint64_t mul_mat_vec_q3_k_f32_f32_len = 10040;
|
||||
|
||||
extern unsigned char dequant_q3_k_data[4804];
|
||||
const uint64_t dequant_q3_k_len = 4804;
|
||||
|
||||
extern unsigned char mul_mat_vec_q3_k_f32_f32_data[10048];
|
||||
const uint64_t mul_mat_vec_q3_k_f32_f32_len = 10048;
|
||||
extern unsigned char mul_mat_vec_q4_k_f32_f32_data[10028];
|
||||
const uint64_t mul_mat_vec_q4_k_f32_f32_len = 10028;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q3_k_f32_data[10020];
|
||||
const uint64_t mul_mat_vec_id_q3_k_f32_len = 10020;
|
||||
extern unsigned char mul_mat_vec_id_q3_k_f32_data[10012];
|
||||
const uint64_t mul_mat_vec_id_q3_k_f32_len = 10012;
|
||||
|
||||
extern unsigned char matmul_q5_1_f32_fp32_data[10836];
|
||||
const uint64_t matmul_q5_1_f32_fp32_len = 10836;
|
||||
extern unsigned char mul_mat_vec_q3_k_f16_f32_data[10168];
|
||||
const uint64_t mul_mat_vec_q3_k_f16_f32_len = 10168;
|
||||
|
||||
extern unsigned char mul_mat_vec_q4_k_f16_f32_data[10548];
|
||||
const uint64_t mul_mat_vec_q4_k_f16_f32_len = 10548;
|
||||
extern unsigned char mul_mat_vec_q4_k_f16_f32_data[10540];
|
||||
const uint64_t mul_mat_vec_q4_k_f16_f32_len = 10540;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q4_k_f32_data[9936];
|
||||
const uint64_t mul_mat_vec_id_q4_k_f32_len = 9936;
|
||||
|
||||
extern unsigned char dequant_q4_k_data[5916];
|
||||
const uint64_t dequant_q4_k_len = 5916;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q4_k_f32_data[9944];
|
||||
const uint64_t mul_mat_vec_id_q4_k_f32_len = 9944;
|
||||
extern unsigned char mul_mat_vec_q5_k_f32_f32_data[12916];
|
||||
const uint64_t mul_mat_vec_q5_k_f32_f32_len = 12916;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_k_f16_f32_data[13436];
|
||||
const uint64_t mul_mat_vec_q5_k_f16_f32_len = 13436;
|
||||
|
||||
extern unsigned char mul_mat_vec_q5_k_f32_f32_data[12924];
|
||||
const uint64_t mul_mat_vec_q5_k_f32_f32_len = 12924;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q5_k_f32_data[12816];
|
||||
const uint64_t mul_mat_vec_id_q5_k_f32_len = 12816;
|
||||
|
||||
extern unsigned char mul_mat_vec_q6_k_f32_f32_data[11852];
|
||||
const uint64_t mul_mat_vec_q6_k_f32_f32_len = 11852;
|
||||
|
||||
extern unsigned char mul_mat_vec_q6_k_f16_f32_data[12108];
|
||||
const uint64_t mul_mat_vec_q6_k_f16_f32_len = 12108;
|
||||
extern unsigned char mul_mat_vec_id_q5_k_f32_data[12808];
|
||||
const uint64_t mul_mat_vec_id_q5_k_f32_len = 12808;
|
||||
|
||||
extern unsigned char dequant_q5_k_data[5980];
|
||||
const uint64_t dequant_q5_k_len = 5980;
|
||||
|
||||
extern unsigned char dequant_q6_k_data[4272];
|
||||
const uint64_t dequant_q6_k_len = 4272;
|
||||
extern unsigned char mul_mat_vec_q6_k_f32_f32_data[11840];
|
||||
const uint64_t mul_mat_vec_q6_k_f32_f32_len = 11840;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q6_k_f32_data[11760];
|
||||
const uint64_t mul_mat_vec_id_q6_k_f32_len = 11760;
|
||||
extern unsigned char mul_mat_vec_q6_k_f16_f32_data[12096];
|
||||
const uint64_t mul_mat_vec_q6_k_f16_f32_len = 12096;
|
||||
|
||||
extern unsigned char mul_mat_vec_iq4_nl_f32_f32_data[4728];
|
||||
const uint64_t mul_mat_vec_iq4_nl_f32_f32_len = 4728;
|
||||
extern unsigned char mul_mat_vec_id_iq4_nl_f32_data[4644];
|
||||
const uint64_t mul_mat_vec_id_iq4_nl_f32_len = 4644;
|
||||
|
||||
extern unsigned char mul_mat_vec_iq4_nl_f16_f32_data[4760];
|
||||
const uint64_t mul_mat_vec_iq4_nl_f16_f32_len = 4760;
|
||||
extern unsigned char mul_mat_vec_iq4_nl_f16_f32_data[4752];
|
||||
const uint64_t mul_mat_vec_iq4_nl_f16_f32_len = 4752;
|
||||
|
||||
extern unsigned char get_rows_iq4_nl_data[4236];
|
||||
const uint64_t get_rows_iq4_nl_len = 4236;
|
||||
|
||||
extern unsigned char dequant_iq4_nl_data[5992];
|
||||
const uint64_t dequant_iq4_nl_len = 5992;
|
||||
|
||||
extern unsigned char get_rows_iq4_nl_f32_data[4220];
|
||||
const uint64_t get_rows_iq4_nl_f32_len = 4220;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_iq4_nl_f32_data[4652];
|
||||
const uint64_t mul_mat_vec_id_iq4_nl_f32_len = 4652;
|
||||
|
||||
extern unsigned char mul_mat_vec_p021_f16_f32_data[2768];
|
||||
const uint64_t mul_mat_vec_p021_f16_f32_len = 2768;
|
||||
|
||||
extern unsigned char mul_mat_vec_q2_k_f16_f32_data[8760];
|
||||
const uint64_t mul_mat_vec_q2_k_f16_f32_len = 8760;
|
||||
|
||||
extern unsigned char mul_mat_vec_nc_f16_f32_data[2824];
|
||||
const uint64_t mul_mat_vec_nc_f16_f32_len = 2824;
|
||||
extern unsigned char mul_mat_vec_q5_k_f16_f32_data[13428];
|
||||
const uint64_t mul_mat_vec_q5_k_f16_f32_len = 13428;
|
||||
|
||||
extern unsigned char norm_f32_data[2792];
|
||||
const uint64_t norm_f32_len = 2792;
|
||||
|
||||
extern unsigned char group_norm_f32_data[3028];
|
||||
const uint64_t group_norm_f32_len = 3028;
|
||||
extern unsigned char mul_mat_vec_q5_1_f32_f32_data[4636];
|
||||
const uint64_t mul_mat_vec_q5_1_f32_f32_len = 4636;
|
||||
|
||||
extern unsigned char dequant_q6_k_data[4272];
|
||||
const uint64_t dequant_q6_k_len = 4272;
|
||||
|
||||
extern unsigned char cpy_f32_f32_data[3416];
|
||||
const uint64_t cpy_f32_f32_len = 3416;
|
||||
|
@ -570,6 +546,9 @@ const uint64_t cpy_f32_f32_len = 3416;
|
|||
extern unsigned char rms_norm_f32_data[2512];
|
||||
const uint64_t rms_norm_f32_len = 2512;
|
||||
|
||||
extern unsigned char group_norm_f32_data[3028];
|
||||
const uint64_t group_norm_f32_len = 3028;
|
||||
|
||||
extern unsigned char cpy_f32_f16_data[3468];
|
||||
const uint64_t cpy_f32_f16_len = 3468;
|
||||
|
||||
|
@ -582,15 +561,18 @@ const uint64_t add_f32_len = 4484;
|
|||
extern unsigned char add_f16_f32_f16_data[4552];
|
||||
const uint64_t add_f16_f32_f16_len = 4552;
|
||||
|
||||
extern unsigned char div_f32_data[4484];
|
||||
const uint64_t div_f32_len = 4484;
|
||||
|
||||
extern unsigned char mul_f32_data[4484];
|
||||
const uint64_t mul_f32_len = 4484;
|
||||
|
||||
extern unsigned char repeat_f32_data[3248];
|
||||
const uint64_t repeat_f32_len = 3248;
|
||||
|
||||
extern unsigned char split_k_reduce_data[1416];
|
||||
const uint64_t split_k_reduce_len = 1416;
|
||||
|
||||
extern unsigned char div_f32_data[4484];
|
||||
const uint64_t div_f32_len = 4484;
|
||||
|
||||
extern unsigned char scale_f32_data[3504];
|
||||
const uint64_t scale_f32_len = 3504;
|
||||
|
||||
|
@ -606,45 +588,42 @@ const uint64_t sqr_f32_len = 3436;
|
|||
extern unsigned char clamp_f32_data[3696];
|
||||
const uint64_t clamp_f32_len = 3696;
|
||||
|
||||
extern unsigned char concat_f16_data[5308];
|
||||
const uint64_t concat_f16_len = 5308;
|
||||
extern unsigned char concat_f16_data[5364];
|
||||
const uint64_t concat_f16_len = 5364;
|
||||
|
||||
extern unsigned char concat_i32_data[5280];
|
||||
const uint64_t concat_i32_len = 5280;
|
||||
|
||||
extern unsigned char gelu_quick_f32_data[1488];
|
||||
const uint64_t gelu_quick_f32_len = 1488;
|
||||
|
||||
extern unsigned char upscale_f32_data[2832];
|
||||
const uint64_t upscale_f32_len = 2832;
|
||||
|
||||
extern unsigned char gelu_f32_data[1668];
|
||||
const uint64_t gelu_f32_len = 1668;
|
||||
|
||||
extern unsigned char gelu_quick_f32_data[1488];
|
||||
const uint64_t gelu_quick_f32_len = 1488;
|
||||
extern unsigned char relu_f32_data[1396];
|
||||
const uint64_t relu_f32_len = 1396;
|
||||
|
||||
extern unsigned char silu_f32_data[1448];
|
||||
const uint64_t silu_f32_len = 1448;
|
||||
|
||||
extern unsigned char relu_f32_data[1396];
|
||||
const uint64_t relu_f32_len = 1396;
|
||||
|
||||
extern unsigned char leaky_relu_f32_data[1524];
|
||||
const uint64_t leaky_relu_f32_len = 1524;
|
||||
|
||||
extern unsigned char tanh_f32_data[1376];
|
||||
const uint64_t tanh_f32_len = 1376;
|
||||
|
||||
extern unsigned char soft_max_f32_data[5004];
|
||||
const uint64_t soft_max_f32_len = 5004;
|
||||
|
||||
extern unsigned char diag_mask_inf_f32_data[1504];
|
||||
const uint64_t diag_mask_inf_f32_len = 1504;
|
||||
|
||||
extern unsigned char soft_max_f32_f16_data[5072];
|
||||
const uint64_t soft_max_f32_f16_len = 5072;
|
||||
|
||||
extern unsigned char rope_norm_f32_data[3852];
|
||||
const uint64_t rope_norm_f32_len = 3852;
|
||||
|
||||
extern unsigned char soft_max_f32_data[5004];
|
||||
const uint64_t soft_max_f32_len = 5004;
|
||||
|
||||
extern unsigned char tanh_f32_data[1376];
|
||||
const uint64_t tanh_f32_len = 1376;
|
||||
|
||||
extern unsigned char rope_norm_f16_data[3952];
|
||||
const uint64_t rope_norm_f16_len = 3952;
|
||||
|
||||
|
@ -657,24 +636,48 @@ const uint64_t rope_neox_f16_len = 3952;
|
|||
extern unsigned char sum_rows_f32_data[2296];
|
||||
const uint64_t sum_rows_f32_len = 2296;
|
||||
|
||||
extern unsigned char im2col_f32_data[3648];
|
||||
const uint64_t im2col_f32_len = 3648;
|
||||
|
||||
extern unsigned char argsort_f32_data[4096];
|
||||
const uint64_t argsort_f32_len = 4096;
|
||||
|
||||
extern unsigned char im2col_f32_data[3648];
|
||||
const uint64_t im2col_f32_len = 3648;
|
||||
|
||||
extern unsigned char im2col_f32_f16_data[3716];
|
||||
const uint64_t im2col_f32_f16_len = 3716;
|
||||
|
||||
extern unsigned char mul_mat_vec_iq4_nl_f32_f32_data[4720];
|
||||
const uint64_t mul_mat_vec_iq4_nl_f32_f32_len = 4720;
|
||||
|
||||
extern unsigned char timestep_embedding_f32_data[1976];
|
||||
const uint64_t timestep_embedding_f32_len = 1976;
|
||||
|
||||
extern unsigned char matmul_id_q3_k_f32_data[13616];
|
||||
const uint64_t matmul_id_q3_k_f32_len = 13616;
|
||||
extern unsigned char matmul_q4_1_f32_aligned_fp32_data[10476];
|
||||
const uint64_t matmul_q4_1_f32_aligned_fp32_len = 10476;
|
||||
|
||||
extern unsigned char matmul_id_f16_data[10788];
|
||||
const uint64_t matmul_id_f16_len = 10788;
|
||||
extern unsigned char mul_mat_vec_nc_f16_f32_data[2824];
|
||||
const uint64_t mul_mat_vec_nc_f16_f32_len = 2824;
|
||||
|
||||
extern unsigned char dequant_q2_k_data[4032];
|
||||
const uint64_t dequant_q2_k_len = 4032;
|
||||
extern unsigned char get_rows_iq4_nl_data[4236];
|
||||
const uint64_t get_rows_iq4_nl_len = 4236;
|
||||
|
||||
extern unsigned char mul_mat_vec_p021_f16_f32_data[2768];
|
||||
const uint64_t mul_mat_vec_p021_f16_f32_len = 2768;
|
||||
|
||||
extern unsigned char get_rows_iq4_nl_f32_data[4220];
|
||||
const uint64_t get_rows_iq4_nl_f32_len = 4220;
|
||||
|
||||
extern unsigned char dequant_iq4_nl_data[5992];
|
||||
const uint64_t dequant_iq4_nl_len = 5992;
|
||||
|
||||
extern unsigned char mul_mat_vec_q8_0_f32_f32_data[4168];
|
||||
const uint64_t mul_mat_vec_q8_0_f32_f32_len = 4168;
|
||||
|
||||
extern unsigned char matmul_q4_k_f32_data[12072];
|
||||
const uint64_t matmul_q4_k_f32_len = 12072;
|
||||
|
||||
extern unsigned char mul_mat_vec_id_q6_k_f32_data[11748];
|
||||
const uint64_t mul_mat_vec_id_q6_k_f32_len = 11748;
|
||||
|
||||
extern unsigned char soft_max_f32_f16_data[5072];
|
||||
const uint64_t soft_max_f32_f16_len = 5072;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -30,6 +30,10 @@ void main() {
|
|||
#ifndef OPTIMIZATION_ERROR_WORKAROUND
|
||||
data_d[p.d_offset + dst_idx] = D_TYPE(is_src0 ? data_a[src0_idx] : data_b[src1_idx]);
|
||||
#else
|
||||
data_d[p.d_offset + dst_idx] = is_src0 ? data_a[src0_idx] : data_b[src1_idx];
|
||||
if (is_src0) {
|
||||
data_d[p.d_offset + dst_idx] = data_a[src0_idx];
|
||||
} else {
|
||||
data_d[p.d_offset + dst_idx] = data_b[src1_idx];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -39,8 +39,7 @@ void main() {
|
|||
vec2 v = dequantize(ib, iqs, a_offset / QUANT_K);
|
||||
|
||||
// matrix multiplication
|
||||
tmp[tid] += FLOAT_TYPE(v.x) * FLOAT_TYPE(data_b[b_offset + iybs + iqs]) +
|
||||
FLOAT_TYPE(v.y) * FLOAT_TYPE(data_b[b_offset + iybs + iqs + y_offset]);
|
||||
tmp[tid] = fma(FLOAT_TYPE(v.x), FLOAT_TYPE(data_b[b_offset + iybs + iqs]), fma(FLOAT_TYPE(v.y), FLOAT_TYPE(data_b[b_offset + iybs + iqs + y_offset]), tmp[tid]));
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
|
|
|
@ -53,7 +53,7 @@ void main() {
|
|||
|
||||
const FLOAT_TYPE xi = FLOAT_TYPE(data_a[ix]);
|
||||
|
||||
tmp[tid] += xi * FLOAT_TYPE(data_b[iy]);
|
||||
tmp[tid] = fma(xi, FLOAT_TYPE(data_b[iy]), tmp[tid]);
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
|
|
|
@ -52,7 +52,7 @@ void main() {
|
|||
// y is not transposed but permuted
|
||||
const uint iy = channel*nrows_y + row_y;
|
||||
|
||||
tmp[tid] += xi * FLOAT_TYPE(data_b[iy]);
|
||||
tmp[tid] = fma(xi, FLOAT_TYPE(data_b[iy]), tmp[tid]);
|
||||
}
|
||||
|
||||
// dst is not transposed and not permuted
|
||||
|
|
|
@ -39,24 +39,25 @@ void main() {
|
|||
FLOAT_TYPE sum1 = FLOAT_TYPE(0.0);
|
||||
FLOAT_TYPE sum2 = FLOAT_TYPE(0.0);
|
||||
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||
sum1 += FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 0) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 0) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 2) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 2) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 4) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 4) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 6) & 3)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 6) & 3);
|
||||
sum2 += FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 0] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 1] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 2] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 3] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 4] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 5] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 6] >> 4) & 0xF)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 7] >> 4) & 0xF);
|
||||
sum1 = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 0) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 0) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 2) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 2) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 4) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 4) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 6) & 3),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l +112]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 6) & 3), sum1))))))));
|
||||
sum2 = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 0] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 1] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 2] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 3] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 4] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 5] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 6] >> 4) & 0xF),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l +112]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 7] >> 4) & 0xF), sum2))))))));
|
||||
}
|
||||
tmp[16 * ix + tid] += dall * sum1 - dmin * sum2;
|
||||
const uint tmp_idx = 16 * ix + tid;
|
||||
tmp[tmp_idx] = fma(dall, sum1, fma(-dmin, sum2, tmp[tmp_idx]));
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
|
|
|
@ -40,16 +40,17 @@ void main() {
|
|||
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(0.0);
|
||||
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||
sum += FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[0] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[2] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[4] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[6] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[1] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[3] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 0) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[5] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[7] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 2) & 0x3) << 4)) - 32) * FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4));
|
||||
sum = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[0] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[2] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[4] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[6] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[1] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[3] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[5] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[7] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum))))))));
|
||||
}
|
||||
tmp[16 * ix + tid] += d * sum;
|
||||
const uint tmp_idx = 16 * ix + tid;
|
||||
tmp[tmp_idx] = fma(d, sum, tmp[tmp_idx]);
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
|
|
|
@ -67,17 +67,17 @@ void main() {
|
|||
const uint8_t q4_14 = uint8_t(data_a[ib0 + i].qs[q_offset + 66] >> 4);
|
||||
const uint8_t q4_15 = uint8_t(data_a[ib0 + i].qs[q_offset + 67] >> 4);
|
||||
|
||||
const FLOAT_TYPE sx = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx]) * q4_0 + FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * q4_1 + FLOAT_TYPE(data_b[b_offset + y1_idx + 2]) * q4_2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 3]) * q4_3);
|
||||
const FLOAT_TYPE sy = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * q4_4 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * q4_5 + FLOAT_TYPE(data_b[b_offset + y1_idx + 34]) * q4_6 + FLOAT_TYPE(data_b[b_offset + y1_idx + 35]) * q4_7);
|
||||
const FLOAT_TYPE sz = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx]) * q4_8 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * q4_9 + FLOAT_TYPE(data_b[b_offset + y2_idx + 2]) * q4_10 + FLOAT_TYPE(data_b[b_offset + y2_idx + 3]) * q4_11);
|
||||
const FLOAT_TYPE sw = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * q4_12 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * q4_13 + FLOAT_TYPE(data_b[b_offset + y2_idx + 34]) * q4_14 + FLOAT_TYPE(data_b[b_offset + y2_idx + 35]) * q4_15);
|
||||
const FLOAT_TYPE smin = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx ]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx ]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 2]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 34]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 2]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 34]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 3]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 35]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 3]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 35]) * sc7
|
||||
);
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * sc0 + sy * sc1 + sz * sc4 + sw * sc5) - dmin * smin);
|
||||
const FLOAT_TYPE sx = fma(FLOAT_TYPE(data_b[b_offset + y1_idx]), q4_0, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 1]), q4_1, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 2]), q4_2, FLOAT_TYPE(data_b[b_offset + y1_idx + 3]) * q4_3)));
|
||||
const FLOAT_TYPE sy = fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]), q4_4, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 33]), q4_5, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 34]), q4_6, FLOAT_TYPE(data_b[b_offset + y1_idx + 35]) * q4_7)));
|
||||
const FLOAT_TYPE sz = fma(FLOAT_TYPE(data_b[b_offset + y2_idx]), q4_8, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 1]), q4_9, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 2]), q4_10, FLOAT_TYPE(data_b[b_offset + y2_idx + 3]) * q4_11)));
|
||||
const FLOAT_TYPE sw = fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]), q4_12, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 33]), q4_13, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 34]), q4_14, FLOAT_TYPE(data_b[b_offset + y2_idx + 35]) * q4_15)));
|
||||
const FLOAT_TYPE smin =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]), sc2, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]), sc3, fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]), sc6, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]), sc7,
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 1]), sc2, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 33]), sc3, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 1]), sc6, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 33]), sc7,
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 2]), sc2, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 34]), sc3, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 2]), sc6, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 34]), sc7,
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 3]), sc2, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 35]), sc3, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 3]), sc6, FLOAT_TYPE(data_b[b_offset + y2_idx + 35]) * sc7)))))))))))))));
|
||||
const uint tmp_idx = 16 * ix + tid;
|
||||
tmp[tmp_idx] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, tmp[tmp_idx]));
|
||||
#else
|
||||
const uint8_t q4_0 = uint8_t(data_a[ib0 + i].qs[q_offset ] & 0xf);
|
||||
const uint8_t q4_1 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] & 0xf);
|
||||
|
@ -88,16 +88,19 @@ void main() {
|
|||
const uint8_t q4_6 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] >> 4);
|
||||
const uint8_t q4_7 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] >> 4);
|
||||
|
||||
const FLOAT_TYPE sx = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx ]) * q4_0 + FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * q4_1);
|
||||
const FLOAT_TYPE sy = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * q4_2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * q4_3);
|
||||
const FLOAT_TYPE sz = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx ]) * q4_4 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * q4_5);
|
||||
const FLOAT_TYPE sw = FLOAT_TYPE(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * q4_6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * q4_7);
|
||||
const FLOAT_TYPE smin = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * sc7
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * sc2 + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * sc3 + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * sc6 + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * sc7
|
||||
);
|
||||
const FLOAT_TYPE sx = fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]), q4_0, FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * q4_1);
|
||||
const FLOAT_TYPE sy = fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]), q4_2, FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * q4_3);
|
||||
const FLOAT_TYPE sz = fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]), q4_4, FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * q4_5);
|
||||
const FLOAT_TYPE sw = fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]), q4_6, FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * q4_7);
|
||||
const FLOAT_TYPE smin =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]), sc2, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]), sc3, fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]), sc6, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]), sc7,
|
||||
+ fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 1]), sc2, fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 33]), sc3, fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 1]), sc6, FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * sc7)))))));
|
||||
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * FLOAT_TYPE(data_a[ib0 + i].scales[v_im] & 0x3f) + sy * FLOAT_TYPE(data_a[ib0 + i].scales[v_im + 1] & 0x3f) + sz * FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 4] & 0x0f) | ((data_a[ib0 + i].scales[v_im] & 0xc0) >> 2)) + sw * FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 5] & 0x0f) | ((data_a[ib0 + i].scales[v_im + 1] & 0xc0) >> 2))) - dmin * smin);
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * FLOAT_TYPE(data_a[ib0 + i].scales[v_im] & 0x3f) + sy * FLOAT_TYPE(data_a[ib0 + i].scales[v_im + 1] & 0x3f) +
|
||||
sz * FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 4] & 0x0f) | ((data_a[ib0 + i].scales[v_im] & 0xc0) >> 2)) + sw * FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 5] & 0x0f) | ((data_a[ib0 + i].scales[v_im + 1] & 0xc0) >> 2))) - dmin * smin);
|
||||
const uint tmp_idx = 16 * ix + tid;
|
||||
tmp[tmp_idx] = fma(dall, (fma(sx, FLOAT_TYPE(data_a[ib0 + i].scales[v_im] & 0x3f), fma(sy, FLOAT_TYPE(data_a[ib0 + i].scales[v_im + 1] & 0x3f),
|
||||
fma(sz, FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 4] & 0x0f) | ((data_a[ib0 + i].scales[v_im] & 0xc0) >> 2)), fma(sw, FLOAT_TYPE((data_a[ib0 + i].scales[v_im + 5] & 0x0f) | ((data_a[ib0 + i].scales[v_im + 1] & 0xc0) >> 2))))))), fma(-dmin, smin, tmp[tmp_idx]));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -66,35 +66,33 @@ void main() {
|
|||
const uint8_t q4_14 = uint8_t(data_a[ib0 + i].qs[q_offset + 80] >> 4);
|
||||
const uint8_t q4_15 = uint8_t(data_a[ib0 + i].qs[q_offset + 81] >> 4);
|
||||
|
||||
const FLOAT_TYPE sx = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx ]) * (q4_0 + (((data_a[ib0 + i].qh[l0 ] & hm1) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) * (q4_1 + (((data_a[ib0 + i].qh[l0 + 1] & hm1) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 16]) * (q4_2 + (((data_a[ib0 + i].qh[l0 + 16] & hm1) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 17]) * (q4_3 + (((data_a[ib0 + i].qh[l0 + 17] & hm1) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE sy = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) * (q4_4 + (((data_a[ib0 + i].qh[l0 ] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) * (q4_5 + (((data_a[ib0 + i].qh[l0 + 1] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 48]) * (q4_6 + (((data_a[ib0 + i].qh[l0 + 16] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y1_idx + 49]) * (q4_7 + (((data_a[ib0 + i].qh[l0 + 17] & (hm1 << 1)) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE sz = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y2_idx ]) * (q4_8 + (((data_a[ib0 + i].qh[l0 ] & hm2) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) * (q4_9 + (((data_a[ib0 + i].qh[l0 + 1] & hm2) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 16]) * (q4_10 + (((data_a[ib0 + i].qh[l0 + 16] & hm2) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 17]) * (q4_11 + (((data_a[ib0 + i].qh[l0 + 17] & hm2) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE sw = FLOAT_TYPE(
|
||||
FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) * (q4_12 + (((data_a[ib0 + i].qh[l0 ] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) * (q4_13 + (((data_a[ib0 + i].qh[l0 + 1] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 48]) * (q4_14 + (((data_a[ib0 + i].qh[l0 + 16] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
+ FLOAT_TYPE(data_b[b_offset + y2_idx + 49]) * (q4_15 + (((data_a[ib0 + i].qh[l0 + 17] & (hm2 << 1)) != 0) ? 16 : 0))
|
||||
);
|
||||
const FLOAT_TYPE smin = FLOAT_TYPE(
|
||||
(FLOAT_TYPE(data_b[b_offset + y1_idx]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 1]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 17])) * sc2 + (FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 49])) * sc3
|
||||
+ (FLOAT_TYPE(data_b[b_offset + y2_idx]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 1]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 17])) * sc6 + (FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 49])) * sc7
|
||||
);
|
||||
tmp[16 * ix + tid] += FLOAT_TYPE(dall * (sx * sc0 + sy * sc1 + sz * sc4 + sw * sc5) - dmin * smin);
|
||||
const FLOAT_TYPE sx =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]), (q4_0 + (((data_a[ib0 + i].qh[l0 ] & hm1) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 1]), (q4_1 + (((data_a[ib0 + i].qh[l0 + 1] & hm1) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 16]), (q4_2 + (((data_a[ib0 + i].qh[l0 + 16] & hm1) != 0) ? 16 : 0)),
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx + 17]) * (q4_3 + (((data_a[ib0 + i].qh[l0 + 17] & hm1) != 0) ? 16 : 0)))));
|
||||
const FLOAT_TYPE sy =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]), (q4_4 + (((data_a[ib0 + i].qh[l0 ] & (hm1 << 1)) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 33]), (q4_5 + (((data_a[ib0 + i].qh[l0 + 1] & (hm1 << 1)) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 48]), (q4_6 + (((data_a[ib0 + i].qh[l0 + 16] & (hm1 << 1)) != 0) ? 16 : 0)),
|
||||
FLOAT_TYPE(data_b[b_offset + y1_idx + 49]) * (q4_7 + (((data_a[ib0 + i].qh[l0 + 17] & (hm1 << 1)) != 0) ? 16 : 0)))));
|
||||
const FLOAT_TYPE sz =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]), (q4_8 + (((data_a[ib0 + i].qh[l0 ] & hm2) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 1]), (q4_9 + (((data_a[ib0 + i].qh[l0 + 1] & hm2) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 16]), (q4_10 + (((data_a[ib0 + i].qh[l0 + 16] & hm2) != 0) ? 16 : 0)),
|
||||
FLOAT_TYPE(data_b[b_offset + y2_idx + 17]) * (q4_11 + (((data_a[ib0 + i].qh[l0 + 17] & hm2) != 0) ? 16 : 0)))));
|
||||
const FLOAT_TYPE sw =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]), (q4_12 + (((data_a[ib0 + i].qh[l0 ] & (hm2 << 1)) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 33]), (q4_13 + (((data_a[ib0 + i].qh[l0 + 1] & (hm2 << 1)) != 0) ? 16 : 0)),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 48]), (q4_14 + (((data_a[ib0 + i].qh[l0 + 16] & (hm2 << 1)) != 0) ? 16 : 0)),
|
||||
FLOAT_TYPE(data_b[b_offset + y2_idx + 49]) * (q4_15 + (((data_a[ib0 + i].qh[l0 + 17] & (hm2 << 1)) != 0) ? 16 : 0)))));
|
||||
const FLOAT_TYPE smin =
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 1 ]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 17]), sc2,
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 49]), sc3,
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 1 ]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 17]), sc6,
|
||||
(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 49])) * sc7)));
|
||||
const uint tmp_idx = 16 * ix + tid;
|
||||
tmp[tmp_idx] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, tmp[tmp_idx]));
|
||||
}
|
||||
|
||||
// sum up partial sums and write back result
|
||||
|
|
|
@ -44,22 +44,22 @@ void main() {
|
|||
const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
|
||||
|
||||
#if K_QUANTS_PER_ITERATION == 1
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(data_b[b_offset + y_idx + 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x03) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 16]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x03) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x0c) << 2)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 48]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x0c) << 2)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x30) >> 0)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 80]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x30) >> 0)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + 96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0xc0) >> 2)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx +112]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0xc0) >> 2)) - 32);
|
||||
tmp[16 * ix + tid] += sum;
|
||||
const uint tmp_idx = 16 * ix + tid;
|
||||
tmp[tmp_idx] = fma(FLOAT_TYPE(data_b[b_offset + y_idx + 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x03) << 4)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + 16]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x03) << 4)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + 32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x0c) << 2)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + 48]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x0c) << 2)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + 64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x30) >> 0)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + 80]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x30) >> 0)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + 96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0xc0) >> 2)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx +112]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0xc0) >> 2)) - 32), tmp[tmp_idx]))))))));
|
||||
#else
|
||||
FLOAT_TYPE sum = FLOAT_TYPE(0.0);
|
||||
[[unroll]] for (int l = 0; l < 4; ++l) {
|
||||
sum += FLOAT_TYPE(data_b[b_offset + y_idx + l+ 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 0) & 3) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l+32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 2) & 3) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l+64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 4) & 3) << 4)) - 32)
|
||||
+ FLOAT_TYPE(data_b[b_offset + y_idx + l+96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d * FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 6) & 3) << 4)) - 32);
|
||||
sum = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+ 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 0) & 3) << 4)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 2) & 3) << 4)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 4) & 3) << 4)) - 32),
|
||||
fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 6) & 3) << 4)) - 32), sum))));
|
||||
}
|
||||
tmp[16 * ix + tid] += sum;
|
||||
#endif
|
||||
|
|
|
@ -326,10 +326,10 @@ void main() {
|
|||
mbyte = uint8_t((data_a[ib].scales[is + 4] >> 4) | ((data_a[ib].scales[is ] >> 6) << 4));
|
||||
}
|
||||
const float d = loadd.x * sc;
|
||||
const float m = loadd.y * mbyte;
|
||||
const float m = -loadd.y * mbyte;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(d * float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) - m);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(d * float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) - m);
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF), m));
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m));
|
||||
#elif defined(DATA_A_Q5_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
@ -357,10 +357,10 @@ void main() {
|
|||
mbyte = uint8_t((data_a[ib].scales[is + 4] >> 4) | ((data_a[ib].scales[is ] >> 6) << 4));
|
||||
}
|
||||
const float d = loadd.x * sc;
|
||||
const float m = loadd.y * mbyte;
|
||||
const float m = -loadd.y * mbyte;
|
||||
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(d * (float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0)) - m);
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(d * (float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0)) - m);
|
||||
buf_a[buf_idx ] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi ] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi ] & hm) != 0 ? 16 : 0), m));
|
||||
buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m));
|
||||
#elif defined(DATA_A_Q6_K)
|
||||
const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a;
|
||||
const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A;
|
||||
|
@ -463,7 +463,8 @@ void main() {
|
|||
[[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) {
|
||||
[[unroll]] for (uint cc = 0; cc < TN; cc++) {
|
||||
[[unroll]] for (uint cr = 0; cr < TM; cr++) {
|
||||
sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr] += float(cache_a[wsir * TM + cr]) * float(cache_b[wsic * TN + cc]);
|
||||
const uint sums_idx = (wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr;
|
||||
sums[sums_idx] = fma(float(cache_a[wsir * TM + cr]), float(cache_b[wsic * TN + cc]), sums[sums_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
24
ggml/src/vulkan-shaders/repeat.comp
Normal file
24
ggml/src/vulkan-shaders/repeat.comp
Normal file
|
@ -0,0 +1,24 @@
|
|||
#version 450
|
||||
|
||||
#include "types.comp"
|
||||
#include "generic_unary_head.comp"
|
||||
|
||||
uint src0_idx_mod(uint idx) {
|
||||
const uint i13 = idx / (p.ne12*p.ne11*p.ne10);
|
||||
const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
|
||||
const uint i12 = (idx - i13_offset) / (p.ne11*p.ne10);
|
||||
const uint i12_offset = i12*p.ne11*p.ne10;
|
||||
const uint i11 = (idx - i13_offset - i12_offset) / p.ne10;
|
||||
const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
|
||||
return (i13 % p.ne03)*p.nb03 + (i12 % p.ne02)*p.nb02 + (i11 % p.ne01)*p.nb01 + (i10 % p.ne00)*p.nb00;
|
||||
}
|
||||
|
||||
void main() {
|
||||
const uint idx = get_idx();
|
||||
|
||||
if (idx >= p.ne) {
|
||||
return;
|
||||
}
|
||||
|
||||
data_d[p.d_offset + dst_idx(idx)] = D_TYPE(data_a[src0_idx_mod(idx)]);
|
||||
}
|
|
@ -381,6 +381,10 @@ void process_shaders(std::vector<std::future<void>>& tasks) {
|
|||
string_to_spv("div_f32", "div.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||
}));
|
||||
|
||||
tasks.push_back(std::async(std::launch::async, [] {
|
||||
string_to_spv("repeat_f32", "repeat.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||
}));
|
||||
|
||||
tasks.push_back(std::async(std::launch::async, [] {
|
||||
string_to_spv("scale_f32", "scale.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||
}));
|
||||
|
|
|
@ -219,6 +219,8 @@ class MODEL_ARCH(IntEnum):
|
|||
T5 = auto()
|
||||
T5ENCODER = auto()
|
||||
JAIS = auto()
|
||||
NEMOTRON = auto()
|
||||
EXAONE = auto()
|
||||
|
||||
|
||||
class MODEL_TENSOR(IntEnum):
|
||||
|
@ -347,6 +349,8 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
|||
MODEL_ARCH.T5: "t5",
|
||||
MODEL_ARCH.T5ENCODER: "t5encoder",
|
||||
MODEL_ARCH.JAIS: "jais",
|
||||
MODEL_ARCH.NEMOTRON: "nemotron",
|
||||
MODEL_ARCH.EXAONE: "exaone",
|
||||
}
|
||||
|
||||
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||
|
@ -1065,6 +1069,37 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
],
|
||||
MODEL_ARCH.NEMOTRON: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
],
|
||||
MODEL_ARCH.EXAONE: [
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
],
|
||||
# TODO
|
||||
}
|
||||
|
||||
|
@ -1105,6 +1140,10 @@ MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_ARCH.CHATGLM: [
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
],
|
||||
MODEL_ARCH.NEMOTRON: [
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
],
|
||||
}
|
||||
|
||||
#
|
||||
|
|
|
@ -10,10 +10,10 @@ class TensorNameMap:
|
|||
# Token embeddings
|
||||
MODEL_TENSOR.TOKEN_EMBD: (
|
||||
"gpt_neox.embed_in", # gptneox
|
||||
"transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais
|
||||
"transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone
|
||||
"transformer.word_embeddings", # falcon
|
||||
"word_embeddings", # bloom
|
||||
"model.embed_tokens", # llama-hf
|
||||
"model.embed_tokens", # llama-hf nemotron
|
||||
"tok_embeddings", # llama-pth
|
||||
"embeddings.word_embeddings", # bert nomic-bert
|
||||
"language_model.embedding.word_embeddings", # persimmon
|
||||
|
@ -52,7 +52,7 @@ class TensorNameMap:
|
|||
# Output
|
||||
MODEL_TENSOR.OUTPUT: (
|
||||
"embed_out", # gptneox
|
||||
"lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais
|
||||
"lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone
|
||||
"output", # llama-pth bloom internlm2
|
||||
"word_embeddings_for_head", # persimmon
|
||||
"lm_head.linear", # phi2
|
||||
|
@ -62,7 +62,7 @@ class TensorNameMap:
|
|||
# Output norm
|
||||
MODEL_TENSOR.OUTPUT_NORM: (
|
||||
"gpt_neox.final_layer_norm", # gptneox
|
||||
"transformer.ln_f", # gpt2 gpt-j falcon jais
|
||||
"transformer.ln_f", # gpt2 gpt-j falcon jais exaone
|
||||
"model.norm", # llama-hf baichuan internlm2
|
||||
"norm", # llama-pth
|
||||
"transformer.norm_f", # mpt dbrx
|
||||
|
@ -75,6 +75,7 @@ class TensorNameMap:
|
|||
"transformer.rms_norm", # Grok
|
||||
"encoder.final_layernorm", # chatglm
|
||||
"transformer.norm", # openelm
|
||||
"model.norm", # nemotron
|
||||
),
|
||||
|
||||
# Rope frequencies
|
||||
|
@ -88,12 +89,12 @@ class TensorNameMap:
|
|||
# Attention norm
|
||||
MODEL_TENSOR.ATTN_NORM: (
|
||||
"gpt_neox.layers.{bid}.input_layernorm", # gptneox
|
||||
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais
|
||||
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais exaone
|
||||
"transformer.blocks.{bid}.norm_1", # mpt
|
||||
"transformer.h.{bid}.input_layernorm", # falcon7b
|
||||
"h.{bid}.input_layernorm", # bloom
|
||||
"transformer.h.{bid}.ln_mlp", # falcon40b
|
||||
"model.layers.{bid}.input_layernorm", # llama-hf
|
||||
"model.layers.{bid}.input_layernorm", # llama-hf nemotron
|
||||
"layers.{bid}.attention_norm", # llama-pth
|
||||
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
|
||||
"model.layers.{bid}.ln1", # yi
|
||||
|
@ -135,18 +136,19 @@ class TensorNameMap:
|
|||
|
||||
# Attention query
|
||||
MODEL_TENSOR.ATTN_Q: (
|
||||
"model.layers.{bid}.self_attn.q_proj", # llama-hf
|
||||
"model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron
|
||||
"layers.{bid}.attention.wq", # llama-pth
|
||||
"encoder.layer.{bid}.attention.self.query", # bert
|
||||
"transformer.h.{bid}.attn.q_proj", # gpt-j
|
||||
"model.layers.layers.{bid}.self_attn.q_proj", # plamo
|
||||
"model.layers.{bid}.attention.wq", # internlm2
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
|
||||
"transformer.h.{bid}.attn.attention.q_proj", # exaone
|
||||
),
|
||||
|
||||
# Attention key
|
||||
MODEL_TENSOR.ATTN_K: (
|
||||
"model.layers.{bid}.self_attn.k_proj", # llama-hf
|
||||
"model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron
|
||||
"layers.{bid}.attention.wk", # llama-pth
|
||||
"encoder.layer.{bid}.attention.self.key", # bert
|
||||
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
||||
|
@ -154,18 +156,20 @@ class TensorNameMap:
|
|||
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
|
||||
"model.layers.{bid}.attention.wk", # internlm2
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
|
||||
"transformer.h.{bid}.attn.attention.k_proj", # exaone
|
||||
),
|
||||
|
||||
# Attention value
|
||||
MODEL_TENSOR.ATTN_V: (
|
||||
"model.layers.{bid}.self_attn.v_proj", # llama-hf
|
||||
"model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron
|
||||
"layers.{bid}.attention.wv", # llama-pth
|
||||
"encoder.layer.{bid}.attention.self.value", # bert
|
||||
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
||||
"transformer.h.{bid}.attn.v", # refact
|
||||
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
|
||||
"model.layers.{bid}.attention.wv", # internlm2
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.value" # Grok
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
|
||||
"transformer.h.{bid}.attn.attention.v_proj", # exaone
|
||||
),
|
||||
|
||||
# Attention output
|
||||
|
@ -175,7 +179,7 @@ class TensorNameMap:
|
|||
"transformer.blocks.{bid}.attn.out_proj", # mpt
|
||||
"transformer.h.{bid}.self_attention.dense", # falcon
|
||||
"h.{bid}.self_attention.dense", # bloom
|
||||
"model.layers.{bid}.self_attn.o_proj", # llama-hf
|
||||
"model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron
|
||||
"layers.{bid}.attention.wo", # llama-pth
|
||||
"encoder.layer.{bid}.attention.output.dense", # bert
|
||||
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
||||
|
@ -190,6 +194,7 @@ class TensorNameMap:
|
|||
"transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
|
||||
"encoder.layers.{bid}.self_attention.dense", # chatglm
|
||||
"transformer.layers.{bid}.attn.out_proj", # openelm
|
||||
"transformer.h.{bid}.attn.attention.out_proj", # exaone
|
||||
),
|
||||
|
||||
# Attention output norm
|
||||
|
@ -215,10 +220,10 @@ class TensorNameMap:
|
|||
# Feed-forward norm
|
||||
MODEL_TENSOR.FFN_NORM: (
|
||||
"gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
|
||||
"transformer.h.{bid}.ln_2", # gpt2 refact qwen jais
|
||||
"transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone
|
||||
"h.{bid}.post_attention_layernorm", # bloom
|
||||
"transformer.blocks.{bid}.norm_2", # mpt
|
||||
"model.layers.{bid}.post_attention_layernorm", # llama-hf
|
||||
"model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron
|
||||
"layers.{bid}.ffn_norm", # llama-pth
|
||||
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
|
||||
"model.layers.{bid}.ln2", # yi
|
||||
|
@ -258,7 +263,7 @@ class TensorNameMap:
|
|||
"transformer.blocks.{bid}.ffn.up_proj", # mpt
|
||||
"transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
|
||||
"h.{bid}.mlp.dense_h_to_4h", # bloom
|
||||
"model.layers.{bid}.mlp.up_proj", # llama-hf refact
|
||||
"model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron
|
||||
"layers.{bid}.feed_forward.w3", # llama-pth
|
||||
"encoder.layer.{bid}.intermediate.dense", # bert
|
||||
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
||||
|
@ -277,6 +282,7 @@ class TensorNameMap:
|
|||
"encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
|
||||
"model.layers.{bid}.residual_mlp.w3", # arctic
|
||||
"encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
|
||||
"transformer.h.{bid}.mlp.c_fc_1", # exaone
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP_EXP: (
|
||||
|
@ -308,6 +314,7 @@ class TensorNameMap:
|
|||
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
|
||||
"transformer.h.{bid}.mlp.linear_1", # refact
|
||||
"model.layers.{bid}.residual_mlp.w1", # arctic
|
||||
"transformer.h.{bid}.mlp.c_fc_0", # exaone
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||
|
@ -329,7 +336,7 @@ class TensorNameMap:
|
|||
"transformer.blocks.{bid}.ffn.down_proj", # mpt
|
||||
"transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
|
||||
"h.{bid}.mlp.dense_4h_to_h", # bloom
|
||||
"model.layers.{bid}.mlp.down_proj", # llama-hf
|
||||
"model.layers.{bid}.mlp.down_proj", # llama-hf nemotron
|
||||
"layers.{bid}.feed_forward.w2", # llama-pth
|
||||
"encoder.layer.{bid}.output.dense", # bert
|
||||
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
||||
|
@ -347,6 +354,7 @@ class TensorNameMap:
|
|||
"model.layers.{bid}.residual_mlp.w2", # arctic
|
||||
"encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
|
||||
"encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
|
||||
"model.layers.h.{bid}.mlp.c_proj", # exaone
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "gguf"
|
||||
version = "0.9.1"
|
||||
version = "0.10.0"
|
||||
description = "Read and write ML models in GGUF for GGML"
|
||||
authors = ["GGML <ggml@ggml.ai>"]
|
||||
packages = [
|
||||
|
|
|
@ -93,6 +93,9 @@ extern "C" {
|
|||
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
|
||||
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
|
||||
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
|
||||
LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
|
||||
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
|
||||
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
|
||||
};
|
||||
|
||||
enum llama_rope_type {
|
||||
|
@ -914,11 +917,8 @@ extern "C" {
|
|||
LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
|
||||
LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
|
||||
|
||||
// Returns -1 if unknown, 1 for true or 0 for false.
|
||||
LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
|
||||
|
||||
// Returns -1 if unknown, 1 for true or 0 for false.
|
||||
LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
|
||||
LLAMA_API bool llama_add_bos_token(const struct llama_model * model);
|
||||
LLAMA_API bool llama_add_eos_token(const struct llama_model * model);
|
||||
|
||||
// Codellama infill tokens
|
||||
LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
|
||||
|
|
17
klite.embd
17
klite.embd
|
@ -12,7 +12,7 @@ Current version indicated by LITEVER below.
|
|||
-->
|
||||
|
||||
<script>
|
||||
const LITEVER = 163;
|
||||
const LITEVER = 164;
|
||||
const urlParams = new URLSearchParams(window.location.search);
|
||||
const localflag = true;
|
||||
const STORAGE_PREFIX = (localflag?"e_":"")+"kaihordewebui_";
|
||||
|
@ -5945,7 +5945,7 @@ Current version indicated by LITEVER below.
|
|||
try {
|
||||
let new_loaded_storyobj = JSON.parse(text);
|
||||
//we don't want to fiddle with the file as its very complex. only handle the parts we are interested in, and just leave the rest untouched.
|
||||
if (new_loaded_storyobj.prompt != null && !new_loaded_storyobj.scenarioVersion) {
|
||||
if (is_kai_json(new_loaded_storyobj) && !new_loaded_storyobj.scenarioVersion) {
|
||||
//quick sanity check. if prompt does not exist, this is not a KAI save.
|
||||
kai_json_load(new_loaded_storyobj,false);
|
||||
if (selectedFilename && selectedFilename != "") {
|
||||
|
@ -6043,6 +6043,12 @@ Current version indicated by LITEVER below.
|
|||
return (gametext_arr.length == 0 && current_memory == "" && current_anote == "" && current_wi.length == 0 && redo_arr.length == 0);
|
||||
}
|
||||
|
||||
function is_kai_json(obj)
|
||||
{
|
||||
let is_kai = (!(tmpstory.prompt==null) || tmpstory.savedsettings!=null);
|
||||
return is_kai;
|
||||
}
|
||||
|
||||
function kai_json_load(storyobj, force_load_settngs)
|
||||
{
|
||||
//either show popup or just proceed to load
|
||||
|
@ -6151,7 +6157,7 @@ Current version indicated by LITEVER below.
|
|||
}
|
||||
} else {
|
||||
//v2 load
|
||||
if(storyobj.prompt!="")
|
||||
if(storyobj.prompt != "")
|
||||
{
|
||||
gametext_arr.push(storyobj.prompt);
|
||||
}
|
||||
|
@ -6517,7 +6523,7 @@ Current version indicated by LITEVER below.
|
|||
{
|
||||
console.log("Loading nai obj");
|
||||
restart_new_game(false);
|
||||
if(obj.prompt!="")
|
||||
if(obj.prompt != "")
|
||||
{
|
||||
gametext_arr.push(obj.prompt);
|
||||
}
|
||||
|
@ -8521,8 +8527,7 @@ Current version indicated by LITEVER below.
|
|||
.then(response => response.json())
|
||||
.then(values5 => {
|
||||
let tmpstory = values5;
|
||||
let is_kai = !(tmpstory.prompt==null);
|
||||
if(is_kai)
|
||||
if(is_kai_json(tmpstory))
|
||||
{
|
||||
if (localsettings.persist_session && !safe_to_overwrite()) {
|
||||
console.log("Preload story: Unsafe to overwrite");
|
||||
|
|
|
@ -616,6 +616,7 @@ struct llm_tokenizer_bpe {
|
|||
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
|
||||
case LLAMA_VOCAB_PRE_TYPE_SMOLLM:
|
||||
case LLAMA_VOCAB_PRE_TYPE_CODESHELL:
|
||||
case LLAMA_VOCAB_PRE_TYPE_EXAONE:
|
||||
regex_exprs = {
|
||||
"\\p{N}",
|
||||
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
||||
|
@ -638,6 +639,8 @@ struct llm_tokenizer_bpe {
|
|||
};
|
||||
break;
|
||||
case LLAMA_VOCAB_PRE_TYPE_PORO:
|
||||
case LLAMA_VOCAB_PRE_TYPE_BLOOM:
|
||||
case LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH:
|
||||
regex_exprs = {
|
||||
" ?[^(\\s|.,!?…。,、।۔،)]+",
|
||||
};
|
||||
|
@ -1723,11 +1726,11 @@ llama_token llama_token_pad_impl(const struct llama_vocab & vocab) {
|
|||
return vocab.special_pad_id;
|
||||
}
|
||||
|
||||
int32_t llama_add_bos_token_impl(const struct llama_vocab & vocab) {
|
||||
bool llama_add_bos_token_impl(const struct llama_vocab & vocab) {
|
||||
return vocab.tokenizer_add_bos;
|
||||
}
|
||||
|
||||
int32_t llama_add_eos_token_impl(const struct llama_vocab & vocab) {
|
||||
bool llama_add_eos_token_impl(const struct llama_vocab & vocab) {
|
||||
return vocab.tokenizer_add_eos;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,8 +95,8 @@ llama_token llama_token_sep_impl(const struct llama_vocab & vocab);
|
|||
llama_token llama_token_nl_impl (const struct llama_vocab & vocab);
|
||||
llama_token llama_token_pad_impl(const struct llama_vocab & vocab);
|
||||
|
||||
int32_t llama_add_bos_token_impl(const struct llama_vocab & vocab);
|
||||
int32_t llama_add_eos_token_impl(const struct llama_vocab & vocab);
|
||||
bool llama_add_bos_token_impl(const struct llama_vocab & vocab);
|
||||
bool llama_add_eos_token_impl(const struct llama_vocab & vocab);
|
||||
|
||||
llama_token llama_token_prefix_impl(const struct llama_vocab & vocab);
|
||||
llama_token llama_token_middle_impl(const struct llama_vocab & vocab);
|
||||
|
|
419
src/llama.cpp
419
src/llama.cpp
|
@ -219,6 +219,8 @@ enum llm_arch {
|
|||
LLM_ARCH_T5,
|
||||
LLM_ARCH_T5ENCODER,
|
||||
LLM_ARCH_JAIS,
|
||||
LLM_ARCH_NEMOTRON,
|
||||
LLM_ARCH_EXAONE,
|
||||
LLM_ARCH_UNKNOWN,
|
||||
};
|
||||
|
||||
|
@ -264,6 +266,8 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|||
{ LLM_ARCH_T5, "t5" },
|
||||
{ LLM_ARCH_T5ENCODER, "t5encoder" },
|
||||
{ LLM_ARCH_JAIS, "jais" },
|
||||
{ LLM_ARCH_NEMOTRON, "nemotron" },
|
||||
{ LLM_ARCH_EXAONE, "exaone" },
|
||||
{ LLM_ARCH_UNKNOWN, "(unknown)" },
|
||||
};
|
||||
|
||||
|
@ -1305,6 +1309,43 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
|||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_NEMOTRON,
|
||||
{
|
||||
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||
{ LLM_TENSOR_OUTPUT, "output" },
|
||||
{ LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
||||
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||
{ LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
|
||||
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_EXAONE,
|
||||
{
|
||||
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||
{ LLM_TENSOR_OUTPUT, "output" },
|
||||
{ LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
||||
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||
{ LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
|
||||
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||
},
|
||||
},
|
||||
{
|
||||
LLM_ARCH_UNKNOWN,
|
||||
{
|
||||
|
@ -5268,6 +5309,23 @@ static void llm_load_hparams(
|
|||
default: model.type = e_model::MODEL_UNKNOWN;
|
||||
}
|
||||
} break;
|
||||
case LLM_ARCH_NEMOTRON:
|
||||
{
|
||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
||||
switch (hparams.n_layer) {
|
||||
case 32: model.type = e_model::MODEL_4B; break;
|
||||
default: model.type = e_model::MODEL_UNKNOWN;
|
||||
}
|
||||
} break;
|
||||
case LLM_ARCH_EXAONE:
|
||||
{
|
||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||
|
||||
switch (hparams.n_layer) {
|
||||
case 32: model.type = e_model::MODEL_8B; break;
|
||||
default: model.type = e_model::MODEL_UNKNOWN;
|
||||
}
|
||||
} break;
|
||||
default: (void)0;
|
||||
}
|
||||
|
||||
|
@ -5509,6 +5567,15 @@ static void llm_load_vocab(
|
|||
} else if (
|
||||
tokenizer_pre == "codeshell") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
|
||||
} else if (
|
||||
tokenizer_pre == "bloom") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
|
||||
} else if (
|
||||
tokenizer_pre == "gpt3-finnish") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
|
||||
} else if (
|
||||
tokenizer_pre == "exaone") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE;
|
||||
} else {
|
||||
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||
}
|
||||
|
@ -7622,6 +7689,78 @@ static bool llm_load_tensors(
|
|||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
||||
}
|
||||
} break;
|
||||
case LLM_ARCH_NEMOTRON:
|
||||
{
|
||||
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||
|
||||
// output
|
||||
{
|
||||
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
||||
model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
|
||||
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
|
||||
}
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
ggml_context * ctx_layer = ctx_for_layer(i);
|
||||
ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
||||
layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
|
||||
|
||||
layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
|
||||
layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
|
||||
layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
|
||||
layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
|
||||
|
||||
// optional bias tensors
|
||||
layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
|
||||
layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
||||
layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
|
||||
|
||||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
||||
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||
|
||||
// optional MLP bias
|
||||
layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
||||
}
|
||||
} break;
|
||||
case LLM_ARCH_EXAONE:
|
||||
{
|
||||
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||
|
||||
// output
|
||||
{
|
||||
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
||||
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
|
||||
}
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
ggml_context * ctx_layer = ctx_for_layer(i);
|
||||
ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
||||
|
||||
layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
|
||||
layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
|
||||
layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
|
||||
layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
|
||||
|
||||
layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
||||
layer.rope_freqs = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FREQS, "weight"), {n_embd/n_head/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
|
||||
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
||||
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
throw std::runtime_error("unknown architecture");
|
||||
}
|
||||
|
@ -8308,7 +8447,7 @@ static struct ggml_tensor * llm_build_kqv(
|
|||
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||
cb(kq, "kq", il);
|
||||
|
||||
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
|
||||
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON) {
|
||||
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
|
||||
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
|
||||
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
||||
|
@ -13827,6 +13966,254 @@ struct llm_build_context {
|
|||
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_cgraph * build_nemotron() {
|
||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
|
||||
|
||||
const int64_t n_embd_head = hparams.n_embd_head_v;
|
||||
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
||||
//GGML_ASSERT(n_embd_head == hparams.n_rot);
|
||||
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_tensor * inpL;
|
||||
|
||||
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||
|
||||
// inp_pos - contains the positions
|
||||
struct ggml_tensor * inp_pos = build_inp_pos();
|
||||
|
||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * inpSA = inpL;
|
||||
|
||||
// norm
|
||||
cur = llm_build_norm(ctx0, inpL, hparams,
|
||||
model.layers[il].attn_norm,
|
||||
model.layers[il].attn_norm_b,
|
||||
LLM_NORM, cb, il);
|
||||
cb(cur, "attn_norm", il);
|
||||
|
||||
// self-attention
|
||||
{
|
||||
// compute Q and K and RoPE them
|
||||
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
|
||||
cb(Qcur, "Qcur", il);
|
||||
if (model.layers[il].bq) {
|
||||
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
||||
cb(Qcur, "Qcur", il);
|
||||
}
|
||||
|
||||
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
|
||||
cb(Kcur, "Kcur", il);
|
||||
if (model.layers[il].bk) {
|
||||
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
||||
cb(Kcur, "Kcur", il);
|
||||
}
|
||||
|
||||
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
|
||||
cb(Vcur, "Vcur", il);
|
||||
if (model.layers[il].bv) {
|
||||
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
||||
cb(Vcur, "Vcur", il);
|
||||
}
|
||||
|
||||
Qcur = ggml_rope_ext(
|
||||
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
|
||||
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
ext_factor, attn_factor, beta_fast, beta_slow
|
||||
);
|
||||
cb(Qcur, "Qcur", il);
|
||||
|
||||
Kcur = ggml_rope_ext(
|
||||
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
|
||||
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
ext_factor, attn_factor, beta_fast, beta_slow
|
||||
);
|
||||
cb(Kcur, "Kcur", il);
|
||||
|
||||
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
||||
model.layers[il].wo, model.layers[il].bo,
|
||||
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||
}
|
||||
|
||||
if (il == n_layer - 1) {
|
||||
// skip computing output for unused tokens
|
||||
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
||||
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
||||
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
||||
cb(ffn_inp, "ffn_inp", il);
|
||||
|
||||
// feed-forward network
|
||||
cur = llm_build_norm(ctx0, ffn_inp, hparams,
|
||||
model.layers[il].ffn_norm,
|
||||
model.layers[il].ffn_norm_b,
|
||||
LLM_NORM, cb, il);
|
||||
cb(cur, "ffn_norm", il);
|
||||
|
||||
cur = llm_build_ffn(ctx0, lctx, cur,
|
||||
model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
||||
NULL, NULL, NULL,
|
||||
model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
||||
NULL,
|
||||
LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
|
||||
|
||||
cur = ggml_add(ctx0, cur, ffn_inp);
|
||||
cb(cur, "ffn_out", il);
|
||||
|
||||
cur = lctx.cvec.apply_to(ctx0, cur, il);
|
||||
cb(cur, "l_out", il);
|
||||
|
||||
// input for next layer
|
||||
inpL = cur;
|
||||
}
|
||||
|
||||
cur = inpL;
|
||||
|
||||
cur = llm_build_norm(ctx0, cur, hparams,
|
||||
model.output_norm, model.output_norm_b,
|
||||
LLM_NORM, cb, -1);
|
||||
cb(cur, "result_norm", -1);
|
||||
|
||||
// lm_head
|
||||
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
|
||||
cb(cur, "result_output", -1);
|
||||
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_cgraph * build_exaone() {
|
||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
|
||||
|
||||
// mutable variable, needed during the last layer of the computation to skip unused tokens
|
||||
int32_t n_tokens = this->n_tokens;
|
||||
|
||||
const int64_t n_embd_head = hparams.n_embd_head_v;
|
||||
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
||||
GGML_ASSERT(n_embd_head == hparams.n_rot);
|
||||
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_tensor * inpL;
|
||||
|
||||
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||
|
||||
// inp_pos - contains the positions
|
||||
struct ggml_tensor * inp_pos = build_inp_pos();
|
||||
|
||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * inpSA = inpL;
|
||||
|
||||
// norm
|
||||
cur = llm_build_norm(ctx0, inpL, hparams,
|
||||
model.layers[il].attn_norm, NULL,
|
||||
LLM_NORM_RMS, cb, il);
|
||||
cb(cur, "attn_norm", il);
|
||||
|
||||
// self-attention
|
||||
{
|
||||
// rope freq factors for llama3; may return nullptr for llama2 and other models
|
||||
struct ggml_tensor * rope_factors = build_rope_factors(il);
|
||||
|
||||
// compute Q and K and RoPE them
|
||||
struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
|
||||
cb(Qcur, "Qcur", il);
|
||||
if (model.layers[il].bq) {
|
||||
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
||||
cb(Qcur, "Qcur", il);
|
||||
}
|
||||
|
||||
struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
|
||||
cb(Kcur, "Kcur", il);
|
||||
if (model.layers[il].bk) {
|
||||
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
||||
cb(Kcur, "Kcur", il);
|
||||
}
|
||||
|
||||
struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
|
||||
cb(Vcur, "Vcur", il);
|
||||
if (model.layers[il].bv) {
|
||||
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
||||
cb(Vcur, "Vcur", il);
|
||||
}
|
||||
|
||||
Qcur = ggml_rope_ext(
|
||||
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
|
||||
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
ext_factor, attn_factor, beta_fast, beta_slow
|
||||
);
|
||||
cb(Qcur, "Qcur", il);
|
||||
|
||||
Kcur = ggml_rope_ext(
|
||||
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
|
||||
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||
ext_factor, attn_factor, beta_fast, beta_slow
|
||||
);
|
||||
cb(Kcur, "Kcur", il);
|
||||
|
||||
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
||||
model.layers[il].wo, model.layers[il].bo,
|
||||
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||
}
|
||||
|
||||
if (il == n_layer - 1) {
|
||||
// skip computing output for unused tokens
|
||||
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
||||
n_tokens = n_outputs;
|
||||
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
||||
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
||||
cb(ffn_inp, "ffn_inp", il);
|
||||
|
||||
// feed-forward network
|
||||
cur = llm_build_norm(ctx0, ffn_inp, hparams,
|
||||
model.layers[il].ffn_norm, NULL,
|
||||
LLM_NORM_RMS, cb, il);
|
||||
cb(cur, "ffn_norm", il);
|
||||
|
||||
cur = llm_build_ffn(ctx0, lctx, cur,
|
||||
model.layers[il].ffn_up, NULL, NULL,
|
||||
model.layers[il].ffn_gate, NULL, NULL,
|
||||
model.layers[il].ffn_down, NULL, NULL,
|
||||
NULL,
|
||||
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
|
||||
cb(cur, "ffn_out", il);
|
||||
|
||||
cur = ggml_add(ctx0, cur, ffn_inp);
|
||||
cb(cur, "ffn_out", il);
|
||||
|
||||
cur = lctx.cvec.apply_to(ctx0, cur, il);
|
||||
cb(cur, "l_out", il);
|
||||
|
||||
// input for next layer
|
||||
inpL = cur;
|
||||
}
|
||||
|
||||
cur = inpL;
|
||||
|
||||
cur = llm_build_norm(ctx0, cur, hparams,
|
||||
model.output_norm, NULL,
|
||||
LLM_NORM_RMS, cb, -1);
|
||||
cb(cur, "result_norm", -1);
|
||||
|
||||
// lm_head
|
||||
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
|
||||
cb(cur, "result_output", -1);
|
||||
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
return gf;
|
||||
}
|
||||
};
|
||||
|
||||
static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
|
||||
|
@ -14082,6 +14469,14 @@ static struct ggml_cgraph * llama_build_graph(
|
|||
{
|
||||
result = llm.build_jais();
|
||||
} break;
|
||||
case LLM_ARCH_NEMOTRON:
|
||||
{
|
||||
result = llm.build_nemotron();
|
||||
} break;
|
||||
case LLM_ARCH_EXAONE:
|
||||
{
|
||||
result = llm.build_exaone();
|
||||
} break;
|
||||
default:
|
||||
GGML_ABORT("fatal error");
|
||||
}
|
||||
|
@ -17138,6 +17533,8 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|||
case LLM_ARCH_OPENELM:
|
||||
case LLM_ARCH_GPTNEOX:
|
||||
case LLM_ARCH_CODESHELL:
|
||||
case LLM_ARCH_NEMOTRON:
|
||||
case LLM_ARCH_EXAONE:
|
||||
return LLAMA_ROPE_TYPE_NEOX;
|
||||
|
||||
// all model arches should be listed explicitly here
|
||||
|
@ -18774,11 +19171,11 @@ llama_token llama_token_pad(const struct llama_model * model) {
|
|||
return llama_token_pad_impl(model->vocab);
|
||||
}
|
||||
|
||||
int32_t llama_add_bos_token(const struct llama_model * model) {
|
||||
bool llama_add_bos_token(const struct llama_model * model) {
|
||||
return llama_add_bos_token_impl(model->vocab);
|
||||
}
|
||||
|
||||
int32_t llama_add_eos_token(const struct llama_model * model) {
|
||||
bool llama_add_eos_token(const struct llama_model * model) {
|
||||
return llama_add_eos_token_impl(model->vocab);
|
||||
}
|
||||
|
||||
|
@ -19079,6 +19476,22 @@ static int32_t llama_chat_apply_template_internal(
|
|||
if (add_ass) {
|
||||
ss << "Assistant:";
|
||||
}
|
||||
} else if (tmpl == "exaone3" || (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]"))) {
|
||||
// ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
|
||||
// EXAONE-3.0-7.8B-Instruct
|
||||
for (auto message : chat) {
|
||||
std::string role(message->role);
|
||||
if (role == "system") {
|
||||
ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
|
||||
} else if (role == "user") {
|
||||
ss << "[|user|]" << trim(message->content) << "\n";
|
||||
} else if (role == "assistant") {
|
||||
ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
|
||||
}
|
||||
}
|
||||
if (add_ass) {
|
||||
ss << "[|assistant|]";
|
||||
}
|
||||
} else {
|
||||
// template not supported
|
||||
return -1;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue