mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-15 19:39:42 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # README.md # examples/llama-bench/llama-bench.cpp # examples/llama.android/llama/src/main/cpp/llama-android.cpp # examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt # src/llama-vocab.cpp # tests/test-backend-ops.cpp
This commit is contained in:
commit
96407502cd
43 changed files with 15434 additions and 435 deletions
|
@ -48,9 +48,9 @@ KoboldCpp can now also be run on Novita AI, a newer alternative GPU cloud provid
|
||||||
|
|
||||||
## Obtaining a GGUF model
|
## Obtaining a GGUF model
|
||||||
- KoboldCpp uses GGUF models. They are not included with KoboldCpp, but you can download GGUF files from other places such as [TheBloke's Huggingface](https://huggingface.co/TheBloke). Search for "GGUF" on huggingface.co for plenty of compatible models in the `.gguf` format.
|
- KoboldCpp uses GGUF models. They are not included with KoboldCpp, but you can download GGUF files from other places such as [TheBloke's Huggingface](https://huggingface.co/TheBloke). Search for "GGUF" on huggingface.co for plenty of compatible models in the `.gguf` format.
|
||||||
- For beginners, we recommend the models [BookAdventures 8B](https://huggingface.co/KoboldAI/Llama-3.1-8B-BookAdventures-GGUF/resolve/main/Llama-3.1-8B-BookAdventures.Q4_K_S.gguf) or [Tiefighter 13B](https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf) (larger model).
|
- For beginners, we recommend the models [Airoboros Mistral 7B](https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf) (smaller and weaker) or [Tiefighter 13B](https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf) (larger model) or [Beepo 22B](https://huggingface.co/concedo/Beepo-22B-GGUF/resolve/main/Beepo-22B-Q4_K_S.gguf) (largest and most powerful)
|
||||||
- [Alternatively, you can download the tools to convert models to the GGUF format yourself here](https://kcpptools.concedo.workers.dev). Run `convert-hf-to-gguf.py` to convert them, then `quantize_gguf.exe` to quantize the result.
|
- [Alternatively, you can download the tools to convert models to the GGUF format yourself here](https://kcpptools.concedo.workers.dev). Run `convert-hf-to-gguf.py` to convert them, then `quantize_gguf.exe` to quantize the result.
|
||||||
- Other models for Whisper (speech recognition), Image Generation or Image Recognition [can be found on the Wiki](https://github.com/LostRuins/koboldcpp/wiki#what-models-does-koboldcpp-support-what-architectures-are-supported)
|
- Other models for Whisper (speech recognition), Image Generation, Text to Speech or Image Recognition [can be found on the Wiki](https://github.com/LostRuins/koboldcpp/wiki#what-models-does-koboldcpp-support-what-architectures-are-supported)
|
||||||
|
|
||||||
## Improving Performance
|
## Improving Performance
|
||||||
- **GPU Acceleration**: If you're on Windows with an Nvidia GPU you can get CUDA support out of the box using the `--usecublas` flag (Nvidia Only), or `--usevulkan` (Any GPU), make sure you select the correct .exe with CUDA support.
|
- **GPU Acceleration**: If you're on Windows with an Nvidia GPU you can get CUDA support out of the box using the `--usecublas` flag (Nvidia Only), or `--usevulkan` (Any GPU), make sure you select the correct .exe with CUDA support.
|
||||||
|
@ -172,7 +172,7 @@ when you can't use the precompiled binary directly, we provide an automated buil
|
||||||
|
|
||||||
# Where can I download AI model files?
|
# Where can I download AI model files?
|
||||||
- The best place to get GGUF text models is huggingface. For image models, CivitAI has a good selection. Here are some to get started.
|
- The best place to get GGUF text models is huggingface. For image models, CivitAI has a good selection. Here are some to get started.
|
||||||
- Text Generation: [BookAdventures 8B](https://huggingface.co/KoboldAI/Llama-3.1-8B-BookAdventures-GGUF/resolve/main/Llama-3.1-8B-BookAdventures.Q4_K_S.gguf) or [Tiefighter 13B](https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf) (larger model).
|
- Text Generation: [Airoboros Mistral 7B](https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf) (smaller and weaker) or [Tiefighter 13B](https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf) (larger model) or [Beepo 22B](https://huggingface.co/concedo/Beepo-22B-GGUF/resolve/main/Beepo-22B-Q4_K_S.gguf) (largest and most powerful)
|
||||||
- Image Generation: [Anything v3](https://huggingface.co/admruul/anything-v3.0/resolve/main/Anything-V3.0-pruned-fp16.safetensors) or [Deliberate V2](https://huggingface.co/Yntec/Deliberate2/resolve/main/Deliberate_v2.safetensors) or [Dreamshaper SDXL](https://huggingface.co/Lykon/dreamshaper-xl-v2-turbo/resolve/main/DreamShaperXL_Turbo_v2_1.safetensors)
|
- Image Generation: [Anything v3](https://huggingface.co/admruul/anything-v3.0/resolve/main/Anything-V3.0-pruned-fp16.safetensors) or [Deliberate V2](https://huggingface.co/Yntec/Deliberate2/resolve/main/Deliberate_v2.safetensors) or [Dreamshaper SDXL](https://huggingface.co/Lykon/dreamshaper-xl-v2-turbo/resolve/main/DreamShaperXL_Turbo_v2_1.safetensors)
|
||||||
- Image Recognition MMproj: [Pick the correct one for your model architecture here](https://huggingface.co/koboldcpp/mmproj/tree/main)
|
- Image Recognition MMproj: [Pick the correct one for your model architecture here](https://huggingface.co/koboldcpp/mmproj/tree/main)
|
||||||
- Speech Recognition: [Whisper models for Speech-To-Text](https://huggingface.co/koboldcpp/whisper/tree/main)
|
- Speech Recognition: [Whisper models for Speech-To-Text](https://huggingface.co/koboldcpp/whisper/tree/main)
|
||||||
|
|
|
@ -377,6 +377,30 @@ static std::vector<ggml_backend_dev_t> parse_device_list(const std::string & val
|
||||||
return devices;
|
return devices;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void add_rpc_devices(std::string servers) {
|
||||||
|
auto rpc_servers = string_split<std::string>(servers, ',');
|
||||||
|
if (rpc_servers.empty()) {
|
||||||
|
throw std::invalid_argument("no RPC servers specified");
|
||||||
|
}
|
||||||
|
ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
|
||||||
|
if (!rpc_reg) {
|
||||||
|
throw std::invalid_argument("failed to find RPC backend");
|
||||||
|
}
|
||||||
|
typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
|
||||||
|
ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
|
||||||
|
if (!ggml_backend_rpc_add_device_fn) {
|
||||||
|
throw std::invalid_argument("failed to find RPC device add function");
|
||||||
|
}
|
||||||
|
for (const auto & server : rpc_servers) {
|
||||||
|
ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
|
||||||
|
if (dev) {
|
||||||
|
ggml_backend_device_register(dev);
|
||||||
|
} else {
|
||||||
|
throw std::invalid_argument("failed to register RPC device");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
|
bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) {
|
||||||
auto ctx_arg = common_params_parser_init(params, ex, print_usage);
|
auto ctx_arg = common_params_parser_init(params, ex, print_usage);
|
||||||
const common_params params_org = ctx_arg.params; // the example can modify the default params
|
const common_params params_org = ctx_arg.params; // the example can modify the default params
|
||||||
|
@ -1386,7 +1410,8 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||||
{"--rpc"}, "SERVERS",
|
{"--rpc"}, "SERVERS",
|
||||||
"comma separated list of RPC servers",
|
"comma separated list of RPC servers",
|
||||||
[](common_params & params, const std::string & value) {
|
[](common_params & params, const std::string & value) {
|
||||||
params.rpc_servers = value;
|
add_rpc_devices(value);
|
||||||
|
GGML_UNUSED(params);
|
||||||
}
|
}
|
||||||
).set_env("LLAMA_ARG_RPC"));
|
).set_env("LLAMA_ARG_RPC"));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1045,7 +1045,6 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
|
||||||
if (params.n_gpu_layers != -1) {
|
if (params.n_gpu_layers != -1) {
|
||||||
mparams.n_gpu_layers = params.n_gpu_layers;
|
mparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
}
|
}
|
||||||
mparams.rpc_servers = params.rpc_servers.c_str();
|
|
||||||
mparams.main_gpu = params.main_gpu;
|
mparams.main_gpu = params.main_gpu;
|
||||||
mparams.split_mode = params.split_mode;
|
mparams.split_mode = params.split_mode;
|
||||||
mparams.tensor_split = params.tensor_split;
|
mparams.tensor_split = params.tensor_split;
|
||||||
|
|
|
@ -244,7 +244,6 @@ struct common_params {
|
||||||
std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding // NOLINT
|
std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding // NOLINT
|
||||||
std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding // NOLINT
|
std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding // NOLINT
|
||||||
std::string logits_file = ""; // file for saving *all* logits // NOLINT
|
std::string logits_file = ""; // file for saving *all* logits // NOLINT
|
||||||
std::string rpc_servers = ""; // comma separated list of RPC servers // NOLINT
|
|
||||||
|
|
||||||
std::vector<std::string> in_files; // all input files
|
std::vector<std::string> in_files; // all input files
|
||||||
std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
|
std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
|
||||||
|
|
|
@ -2882,6 +2882,66 @@ class InternLM2Model(Model):
|
||||||
return [(self.map_tensor_name(name), data_torch)]
|
return [(self.map_tensor_name(name), data_torch)]
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("InternLM3ForCausalLM")
|
||||||
|
class InternLM3Model(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.LLAMA
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
tokens, scores, toktypes = self._create_vocab_sentencepiece()
|
||||||
|
|
||||||
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
|
self.gguf_writer.add_tokenizer_pre("default")
|
||||||
|
self.gguf_writer.add_token_list(tokens)
|
||||||
|
self.gguf_writer.add_token_scores(scores)
|
||||||
|
self.gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
||||||
|
|
||||||
|
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
|
||||||
|
if tokenizer_config_file.is_file():
|
||||||
|
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config_json = json.load(f)
|
||||||
|
if "add_prefix_space" in tokenizer_config_json:
|
||||||
|
self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
|
||||||
|
|
||||||
|
if "added_tokens_decoder" in tokenizer_config_json:
|
||||||
|
for token_id, token_data in tokenizer_config_json["added_tokens_decoder"].items():
|
||||||
|
if token_data.get("special"):
|
||||||
|
token_id = int(token_id)
|
||||||
|
token = token_data["content"]
|
||||||
|
special_vocab._set_special_token(token, token_id)
|
||||||
|
# update eos token
|
||||||
|
if token == '<|im_end|>' and "eos" in special_vocab.special_token_ids:
|
||||||
|
special_vocab.special_token_ids["eos"] = token_id
|
||||||
|
|
||||||
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
super().set_gguf_parameters()
|
||||||
|
hparams = self.hparams
|
||||||
|
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
|
||||||
|
|
||||||
|
if "head_dim" in hparams:
|
||||||
|
rope_dim = hparams["head_dim"]
|
||||||
|
else:
|
||||||
|
rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
|
||||||
|
self.gguf_writer.add_rope_dimension_count(rope_dim)
|
||||||
|
|
||||||
|
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
|
||||||
|
if self.hparams["rope_scaling"].get("type") == "linear" or self.hparams["rope_scaling"].get("rope_type") == "linear":
|
||||||
|
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
|
||||||
|
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
|
||||||
|
|
||||||
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
|
n_head = self.hparams["num_attention_heads"]
|
||||||
|
n_kv_head = self.hparams.get("num_key_value_heads")
|
||||||
|
if name.endswith(("q_proj.weight", "q_proj.bias")):
|
||||||
|
data_torch = LlamaModel.permute(data_torch, n_head, n_head)
|
||||||
|
if name.endswith(("k_proj.weight", "k_proj.bias")):
|
||||||
|
data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head)
|
||||||
|
return [(self.map_tensor_name(name), data_torch)]
|
||||||
|
|
||||||
|
|
||||||
@Model.register("BertModel", "BertForMaskedLM", "CamembertModel")
|
@Model.register("BertModel", "BertForMaskedLM", "CamembertModel")
|
||||||
class BertModel(Model):
|
class BertModel(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.BERT
|
model_arch = gguf.MODEL_ARCH.BERT
|
||||||
|
|
|
@ -203,6 +203,8 @@ extern "C" {
|
||||||
// Backend registry
|
// Backend registry
|
||||||
//
|
//
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_device_register(ggml_backend_dev_t device);
|
||||||
|
|
||||||
// Backend (reg) enumeration
|
// Backend (reg) enumeration
|
||||||
GGML_API size_t ggml_backend_reg_count(void);
|
GGML_API size_t ggml_backend_reg_count(void);
|
||||||
GGML_API ggml_backend_reg_t ggml_backend_reg_get(size_t index);
|
GGML_API ggml_backend_reg_t ggml_backend_reg_get(size_t index);
|
||||||
|
|
|
@ -1397,16 +1397,20 @@ extern "C" {
|
||||||
float scale,
|
float scale,
|
||||||
float max_bias);
|
float max_bias);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_soft_max_back(
|
GGML_API struct ggml_tensor * ggml_soft_max_ext_back(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b,
|
||||||
|
float scale,
|
||||||
|
float max_bias);
|
||||||
|
|
||||||
// in-place, returns view(a)
|
// in-place, returns view(a)
|
||||||
GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
|
GGML_API struct ggml_tensor * ggml_soft_max_ext_back_inplace(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b,
|
||||||
|
float scale,
|
||||||
|
float max_bias);
|
||||||
|
|
||||||
// rotary position embedding
|
// rotary position embedding
|
||||||
// if (mode & 1) - skip n_past elements (NOT SUPPORTED)
|
// if (mode & 1) - skip n_past elements (NOT SUPPORTED)
|
||||||
|
|
|
@ -37,6 +37,7 @@ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ops that return true for this function must not use restrict pointers for their backend implementations
|
||||||
static bool ggml_op_can_inplace(enum ggml_op op) {
|
static bool ggml_op_can_inplace(enum ggml_op op) {
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case GGML_OP_SCALE:
|
case GGML_OP_SCALE:
|
||||||
|
@ -52,8 +53,12 @@ static bool ggml_op_can_inplace(enum ggml_op op) {
|
||||||
case GGML_OP_LOG:
|
case GGML_OP_LOG:
|
||||||
case GGML_OP_UNARY:
|
case GGML_OP_UNARY:
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
|
case GGML_OP_ROPE_BACK:
|
||||||
|
case GGML_OP_SILU_BACK:
|
||||||
case GGML_OP_RMS_NORM:
|
case GGML_OP_RMS_NORM:
|
||||||
|
case GGML_OP_RMS_NORM_BACK:
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
|
case GGML_OP_SOFT_MAX_BACK:
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -208,7 +208,6 @@ extern "C" {
|
||||||
|
|
||||||
// Internal backend registry API
|
// Internal backend registry API
|
||||||
GGML_API void ggml_backend_register(ggml_backend_reg_t reg);
|
GGML_API void ggml_backend_register(ggml_backend_reg_t reg);
|
||||||
GGML_API void ggml_backend_device_register(ggml_backend_dev_t device);
|
|
||||||
|
|
||||||
// Add backend dynamic loading support to the backend
|
// Add backend dynamic loading support to the backend
|
||||||
|
|
||||||
|
|
|
@ -5574,7 +5574,88 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
|
|
||||||
uint32_t utmp[4];
|
uint32_t utmp[4];
|
||||||
|
|
||||||
#ifdef __ARM_NEON
|
#ifdef __ARM_FEATURE_SVE
|
||||||
|
float sumf = 0;
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
|
||||||
|
const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
|
||||||
|
const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
|
||||||
|
|
||||||
|
const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
|
||||||
|
|
||||||
|
memcpy(utmp, x[i].scales, K_SCALE_SIZE);
|
||||||
|
|
||||||
|
uint32x2_t mins8 = { 0 };
|
||||||
|
mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
|
||||||
|
mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
|
||||||
|
|
||||||
|
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||||
|
utmp[0] &= kmask1;
|
||||||
|
|
||||||
|
const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
|
||||||
|
const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
|
||||||
|
vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
|
||||||
|
sumf -= dmin * vaddvq_s32(prod);
|
||||||
|
|
||||||
|
const uint8_t * scales = (const uint8_t *)utmp;
|
||||||
|
|
||||||
|
const uint8_t * restrict q4 = x[i].qs;
|
||||||
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
const int vector_length = ggml_cpu_get_sve_cnt()*8;
|
||||||
|
const svuint8_t m4b = svdup_n_u8(0xf);
|
||||||
|
const svint32_t mzero = svdup_n_s32(0);
|
||||||
|
svint32_t sumi1 = svdup_n_s32(0);
|
||||||
|
svint32_t sumi1_1 = svdup_n_s32(0);
|
||||||
|
svint32_t sumi1_2 = svdup_n_s32(0);
|
||||||
|
svint32_t sumi2 = svdup_n_s32(0);
|
||||||
|
svint32_t sumi2_1 = svdup_n_s32(0);
|
||||||
|
svint32_t sumi2_2 = svdup_n_s32(0);
|
||||||
|
switch (vector_length) {
|
||||||
|
case 128:
|
||||||
|
{
|
||||||
|
for (int j = 0; j < QK_K/64; ++j) {
|
||||||
|
svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b));
|
||||||
|
svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
|
||||||
|
sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]);
|
||||||
|
q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b));
|
||||||
|
q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
|
||||||
|
sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]);
|
||||||
|
|
||||||
|
q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4));
|
||||||
|
q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
|
||||||
|
sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]);
|
||||||
|
q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4));
|
||||||
|
q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
|
||||||
|
sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]);
|
||||||
|
q4 += 32;
|
||||||
|
}
|
||||||
|
sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2);
|
||||||
|
sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2);
|
||||||
|
sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2)));
|
||||||
|
} break;
|
||||||
|
case 256:
|
||||||
|
case 512:
|
||||||
|
{
|
||||||
|
for (int j = 0; j < QK_K/64; ++j) {
|
||||||
|
const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32;
|
||||||
|
svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b));
|
||||||
|
svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32;
|
||||||
|
sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]);
|
||||||
|
|
||||||
|
q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4));
|
||||||
|
q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32;
|
||||||
|
sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]);
|
||||||
|
}
|
||||||
|
sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2)));
|
||||||
|
} break;
|
||||||
|
default:
|
||||||
|
assert(false && "Unsupported vector length");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*s = sumf;
|
||||||
|
#elif __ARM_NEON
|
||||||
const uint8x16_t m4b = vdupq_n_u8(0xf);
|
const uint8x16_t m4b = vdupq_n_u8(0xf);
|
||||||
const int32x4_t mzero = vdupq_n_s32(0);
|
const int32x4_t mzero = vdupq_n_s32(0);
|
||||||
|
|
||||||
|
|
|
@ -3972,6 +3972,57 @@ static void ggml_compute_forward_dup_bytes(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ggml_compute_forward_dup_q(
|
||||||
|
const struct ggml_compute_params * params,
|
||||||
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
|
const struct ggml_tensor * src0 = dst->src[0];
|
||||||
|
const struct ggml_tensor * src1 = dst->src[1];
|
||||||
|
|
||||||
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
const enum ggml_type type = src0->type;
|
||||||
|
ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float;
|
||||||
|
|
||||||
|
size_t qk = ggml_blck_size(type);
|
||||||
|
const int64_t nr = ggml_nelements(src1) / qk;
|
||||||
|
|
||||||
|
// destination must be contiguous in the first dimension
|
||||||
|
GGML_ASSERT(nb10 == ggml_type_size(dst->type));
|
||||||
|
// must either have first dimension large enough to hold a row, or fully contiguous
|
||||||
|
GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst));
|
||||||
|
|
||||||
|
const int ith = params->ith;
|
||||||
|
const int nth = params->nth;
|
||||||
|
|
||||||
|
const int dr = (nr + nth - 1)/nth;
|
||||||
|
|
||||||
|
// row range for this thread
|
||||||
|
const int ir0 = dr*ith;
|
||||||
|
const int ir1 = MIN(ir0 + dr, nr);
|
||||||
|
|
||||||
|
for (int64_t ir = ir0; ir < ir1; ++ir) {
|
||||||
|
|
||||||
|
uint32_t i = ir * qk;
|
||||||
|
|
||||||
|
const int64_t i03 = i/(ne00 * ne01 * ne02);
|
||||||
|
const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01);
|
||||||
|
const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00;
|
||||||
|
const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00;
|
||||||
|
const int64_t x_offset = (i00/qk)*nb00 + i01*nb01 + i02*nb02 + i03 * nb03;
|
||||||
|
|
||||||
|
const int64_t i13 = i/(ne10 * ne11 * ne12);
|
||||||
|
const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11);
|
||||||
|
const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10;
|
||||||
|
const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10;
|
||||||
|
const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13;
|
||||||
|
|
||||||
|
dequantize_row_q(
|
||||||
|
(const void *) ((char *) src0->data + x_offset),
|
||||||
|
(float *) ((char *) dst->data + dst_offset), qk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void ggml_compute_forward_dup(
|
static void ggml_compute_forward_dup(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
@ -3998,6 +4049,10 @@ static void ggml_compute_forward_dup(
|
||||||
} break;
|
} break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
|
if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) {
|
||||||
|
ggml_compute_forward_dup_q(params, dst);
|
||||||
|
break;
|
||||||
|
}
|
||||||
GGML_ABORT("fatal error");
|
GGML_ABORT("fatal error");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6720,20 +6775,20 @@ static void ggml_compute_forward_silu_back_f32(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
const struct ggml_tensor * grad = dst->src[0];
|
||||||
const struct ggml_tensor * grad = dst->src[1];
|
const struct ggml_tensor * src1 = dst->src[1];
|
||||||
|
|
||||||
assert(ggml_is_contiguous_1(grad));
|
assert(ggml_is_contiguous_1(grad));
|
||||||
assert(ggml_is_contiguous_1(src0));
|
assert(ggml_is_contiguous_1(src1));
|
||||||
assert(ggml_is_contiguous_1(dst));
|
assert(ggml_is_contiguous_1(dst));
|
||||||
assert(ggml_are_same_shape(src0, dst));
|
assert(ggml_are_same_shape(src1, dst));
|
||||||
assert(ggml_are_same_shape(src0, grad));
|
assert(ggml_are_same_shape(src1, grad));
|
||||||
|
|
||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
const int nth = params->nth;
|
const int nth = params->nth;
|
||||||
|
|
||||||
const int nc = src0->ne[0];
|
const int nc = src1->ne[0];
|
||||||
const int nr = ggml_nrows(src0);
|
const int nr = ggml_nrows(src1);
|
||||||
|
|
||||||
// rows per thread
|
// rows per thread
|
||||||
const int dr = (nr + nth - 1)/nth;
|
const int dr = (nr + nth - 1)/nth;
|
||||||
|
@ -6745,7 +6800,7 @@ static void ggml_compute_forward_silu_back_f32(
|
||||||
for (int i1 = ir0; i1 < ir1; i1++) {
|
for (int i1 = ir0; i1 < ir1; i1++) {
|
||||||
ggml_vec_silu_backward_f32(nc,
|
ggml_vec_silu_backward_f32(nc,
|
||||||
(float *) ((char *) dst->data + i1*( dst->nb[1])),
|
(float *) ((char *) dst->data + i1*( dst->nb[1])),
|
||||||
(float *) ((char *) src0->data + i1*(src0->nb[1])),
|
(float *) ((char *) src1->data + i1*(src1->nb[1])),
|
||||||
(float *) ((char *) grad->data + i1*(grad->nb[1])));
|
(float *) ((char *) grad->data + i1*(grad->nb[1])));
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
|
@ -6924,7 +6979,7 @@ static void ggml_compute_forward_norm_f32(
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, dst->op_params, sizeof(float));
|
memcpy(&eps, dst->op_params, sizeof(float));
|
||||||
|
|
||||||
GGML_ASSERT(eps > 0.0f);
|
GGML_ASSERT(eps >= 0.0f);
|
||||||
|
|
||||||
// TODO: optimize
|
// TODO: optimize
|
||||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
@ -6995,7 +7050,7 @@ static void ggml_compute_forward_rms_norm_f32(
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, dst->op_params, sizeof(float));
|
memcpy(&eps, dst->op_params, sizeof(float));
|
||||||
|
|
||||||
GGML_ASSERT(eps > 0.0f);
|
GGML_ASSERT(eps >= 0.0f);
|
||||||
|
|
||||||
// TODO: optimize
|
// TODO: optimize
|
||||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||||
|
@ -7047,12 +7102,13 @@ static void ggml_compute_forward_rms_norm_back_f32(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
const struct ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output
|
||||||
const struct ggml_tensor * src1 = dst->src[1];
|
const struct ggml_tensor * src1 = dst->src[1]; // src1 from forward pass
|
||||||
|
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
|
GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
|
||||||
|
|
||||||
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
||||||
|
GGML_ASSERT(src1->nb[0] == sizeof(float));
|
||||||
|
|
||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
const int nth = params->nth;
|
const int nth = params->nth;
|
||||||
|
@ -7071,8 +7127,8 @@ static void ggml_compute_forward_rms_norm_back_f32(
|
||||||
const int64_t i12 = i02;
|
const int64_t i12 = i02;
|
||||||
const int64_t i13 = i03;
|
const int64_t i13 = i03;
|
||||||
|
|
||||||
const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
|
const float * dz = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
|
||||||
const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
|
const float * x = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
|
||||||
|
|
||||||
ggml_float sum_xx = 0.0;
|
ggml_float sum_xx = 0.0;
|
||||||
ggml_float sum_xdz = 0.0;
|
ggml_float sum_xdz = 0.0;
|
||||||
|
@ -7095,9 +7151,9 @@ static void ggml_compute_forward_rms_norm_back_f32(
|
||||||
{
|
{
|
||||||
// z = rms_norm(x)
|
// z = rms_norm(x)
|
||||||
//
|
//
|
||||||
// rms_norm(src0) =
|
// rms_norm(src1) =
|
||||||
// scale(
|
// scale(
|
||||||
// src0,
|
// src1,
|
||||||
// div(
|
// div(
|
||||||
// 1,
|
// 1,
|
||||||
// sqrt(
|
// sqrt(
|
||||||
|
@ -7105,13 +7161,13 @@ static void ggml_compute_forward_rms_norm_back_f32(
|
||||||
// scale(
|
// scale(
|
||||||
// sum(
|
// sum(
|
||||||
// sqr(
|
// sqr(
|
||||||
// src0)),
|
// src1)),
|
||||||
// (1.0/N)),
|
// (1.0/N)),
|
||||||
// eps))));
|
// eps))));
|
||||||
|
|
||||||
// postorder:
|
// postorder:
|
||||||
// ## op args grad
|
// ## op args grad
|
||||||
// 00 param src0 grad[#00]
|
// 00 param src1 grad[#00]
|
||||||
// 01 const 1
|
// 01 const 1
|
||||||
// 02 sqr (#00) grad[#02]
|
// 02 sqr (#00) grad[#02]
|
||||||
// 03 sum (#02) grad[#03]
|
// 03 sum (#02) grad[#03]
|
||||||
|
@ -7188,6 +7244,7 @@ static void ggml_compute_forward_rms_norm_back_f32(
|
||||||
// dx := scale(dx, rrms)
|
// dx := scale(dx, rrms)
|
||||||
float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
|
float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
|
||||||
|
|
||||||
|
// dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps)
|
||||||
ggml_vec_cpy_f32 (ne00, dx, x);
|
ggml_vec_cpy_f32 (ne00, dx, x);
|
||||||
// ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
|
// ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
|
||||||
ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
|
ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
|
||||||
|
@ -7788,12 +7845,13 @@ static void ggml_compute_forward_out_prod_f32(
|
||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
const int nth = params->nth;
|
const int nth = params->nth;
|
||||||
|
|
||||||
GGML_ASSERT(ne0 == ne00);
|
GGML_ASSERT(ne0 == ne00);
|
||||||
GGML_ASSERT(ne1 == ne10);
|
GGML_ASSERT(ne1 == ne10);
|
||||||
GGML_ASSERT(ne2 == ne02);
|
GGML_ASSERT(ne2 == ne12);
|
||||||
GGML_ASSERT(ne02 == ne12);
|
GGML_ASSERT(ne3 == ne13);
|
||||||
GGML_ASSERT(ne3 == ne13);
|
|
||||||
GGML_ASSERT(ne03 == ne13);
|
GGML_ASSERT(ne2 % ne02 == 0);
|
||||||
|
GGML_ASSERT(ne3 % ne03 == 0);
|
||||||
|
|
||||||
// we don't support permuted src0 or src1
|
// we don't support permuted src0 or src1
|
||||||
GGML_ASSERT(nb00 == sizeof(float));
|
GGML_ASSERT(nb00 == sizeof(float));
|
||||||
|
@ -7835,6 +7893,10 @@ static void ggml_compute_forward_out_prod_f32(
|
||||||
const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
|
const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
|
||||||
const int64_t blck_1 = 16;
|
const int64_t blck_1 = 16;
|
||||||
|
|
||||||
|
// dps == dst per src0, used for group query attention
|
||||||
|
const int64_t dps2 = ne2 / ne02;
|
||||||
|
const int64_t dps3 = ne3 / ne03;
|
||||||
|
|
||||||
for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
|
for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
|
||||||
const int64_t bir1 = MIN(bir + blck_1, ir1);
|
const int64_t bir1 = MIN(bir + blck_1, ir1);
|
||||||
for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
|
for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
|
||||||
|
@ -7845,8 +7907,8 @@ static void ggml_compute_forward_out_prod_f32(
|
||||||
const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
|
const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
|
||||||
const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
|
const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
|
||||||
|
|
||||||
const int64_t i02 = i2;
|
const int64_t i02 = i2 / dps2;
|
||||||
const int64_t i03 = i3;
|
const int64_t i03 = i3 / dps3;
|
||||||
|
|
||||||
//const int64_t i10 = i1;
|
//const int64_t i10 = i1;
|
||||||
const int64_t i12 = i2;
|
const int64_t i12 = i2;
|
||||||
|
@ -8944,9 +9006,9 @@ static void ggml_compute_forward_soft_max(
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ggml_compute_forward_soft_max_back
|
// ggml_compute_forward_soft_max_ext_back
|
||||||
|
|
||||||
static void ggml_compute_forward_soft_max_back_f32(
|
static void ggml_compute_forward_soft_max_ext_back_f32(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
|
@ -8959,6 +9021,14 @@ static void ggml_compute_forward_soft_max_back_f32(
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src1, dst));
|
GGML_ASSERT(ggml_are_same_shape(src1, dst));
|
||||||
|
|
||||||
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
GGML_ASSERT(max_bias == 0.0f);
|
||||||
|
|
||||||
// TODO: handle transposed/permuted matrices
|
// TODO: handle transposed/permuted matrices
|
||||||
|
|
||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
|
@ -9007,10 +9077,11 @@ static void ggml_compute_forward_soft_max_back_f32(
|
||||||
|
|
||||||
// linear runtime, no additional memory
|
// linear runtime, no additional memory
|
||||||
float dot_y_dy = 0;
|
float dot_y_dy = 0;
|
||||||
ggml_vec_dot_f32 (nc, &dot_y_dy, 0, y, 0, dy, 0, 1);
|
ggml_vec_dot_f32 (nc, &dot_y_dy, 0, y, 0, dy, 0, 1);
|
||||||
ggml_vec_cpy_f32 (nc, dx, dy);
|
ggml_vec_cpy_f32 (nc, dx, dy);
|
||||||
ggml_vec_acc1_f32(nc, dx, -dot_y_dy);
|
ggml_vec_acc1_f32 (nc, dx, -dot_y_dy);
|
||||||
ggml_vec_mul_f32 (nc, dx, dx, y);
|
ggml_vec_mul_f32 (nc, dx, dx, y);
|
||||||
|
ggml_vec_scale_f32(nc, dx, scale);
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
for (int i = 0; i < nc; ++i) {
|
for (int i = 0; i < nc; ++i) {
|
||||||
|
@ -9021,7 +9092,7 @@ static void ggml_compute_forward_soft_max_back_f32(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_compute_forward_soft_max_back(
|
static void ggml_compute_forward_soft_max_ext_back(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
|
@ -9030,7 +9101,7 @@ static void ggml_compute_forward_soft_max_back(
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_soft_max_back_f32(params, dst);
|
ggml_compute_forward_soft_max_ext_back_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
|
@ -10023,9 +10094,10 @@ static void ggml_compute_forward_im2col_back_f32(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
const struct ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output
|
||||||
const struct ggml_tensor * src1 = dst->src[1];
|
const struct ggml_tensor * src1 = dst->src[1]; // convolution kernel
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
@ -10047,11 +10119,11 @@ static void ggml_compute_forward_im2col_back_f32(
|
||||||
const int64_t IH = is_2D ? ne1 : 1;
|
const int64_t IH = is_2D ? ne1 : 1;
|
||||||
const int64_t IW = ne0;
|
const int64_t IW = ne0;
|
||||||
|
|
||||||
const int64_t KH = is_2D ? ne01 : 1;
|
const int64_t KH = is_2D ? ne11 : 1;
|
||||||
const int64_t KW = ne00;
|
const int64_t KW = ne10;
|
||||||
|
|
||||||
const int64_t OH = is_2D ? ne12 : 1;
|
const int64_t OH = is_2D ? ne02 : 1;
|
||||||
const int64_t OW = ne11;
|
const int64_t OW = ne01;
|
||||||
|
|
||||||
int ofs0 = is_2D ? nb3 : nb2;
|
int ofs0 = is_2D ? nb3 : nb2;
|
||||||
int ofs1 = is_2D ? nb2 : nb1;
|
int ofs1 = is_2D ? nb2 : nb1;
|
||||||
|
@ -10097,9 +10169,9 @@ static void ggml_compute_forward_im2col_back_f32(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const float * const src_data = (const float *) src1->data
|
const float * const grad_in = (const float *) src0->data
|
||||||
+ (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
|
+ (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
|
||||||
grad += src_data[iic*(KH*KW) + ikh*KW + ikw];
|
grad += grad_in[iic*(KH*KW) + ikh*KW + ikw];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
float * dst_data = (float *)((char *) wdata + (in*ofs0 + iic*ofs1)); // [IH, IW]
|
float * dst_data = (float *)((char *) wdata + (in*ofs0 + iic*ofs1)); // [IH, IW]
|
||||||
|
@ -12522,22 +12594,22 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
|
||||||
const struct ggml_compute_params * params,
|
const struct ggml_compute_params * params,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
const struct ggml_tensor * grad = dst->src[0]; // gradient of forward pass output
|
||||||
const struct ggml_tensor * src1 = dst->src[1];
|
const struct ggml_tensor * src0f = dst->src[1]; // src0 of forward pass
|
||||||
const struct ggml_tensor * opt0 = dst->src[2];
|
const struct ggml_tensor * src1f = dst->src[2]; // src1 of forward pass
|
||||||
|
|
||||||
GGML_ASSERT(ggml_is_contiguous(dst));
|
GGML_ASSERT(ggml_is_contiguous(dst));
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
GGML_ASSERT(ggml_is_contiguous(src0f));
|
||||||
GGML_ASSERT(ggml_is_contiguous(src1));
|
GGML_ASSERT(ggml_is_contiguous(src1f));
|
||||||
GGML_ASSERT(ggml_is_contiguous(opt0));
|
GGML_ASSERT(ggml_is_contiguous(grad));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
|
GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst));
|
||||||
|
|
||||||
const int64_t ith = params->ith;
|
const int64_t ith = params->ith;
|
||||||
const int64_t nth = params->nth;
|
const int64_t nth = params->nth;
|
||||||
|
|
||||||
// TODO: handle transposed/permuted matrices
|
// TODO: handle transposed/permuted matrices
|
||||||
const int64_t nc = src0->ne[0];
|
const int64_t nc = src0f->ne[0];
|
||||||
const int64_t nr = ggml_nrows(src0);
|
const int64_t nr = ggml_nrows(src0f);
|
||||||
|
|
||||||
// rows per thread
|
// rows per thread
|
||||||
const int64_t dr = (nr + nth - 1)/nth;
|
const int64_t dr = (nr + nth - 1)/nth;
|
||||||
|
@ -12546,12 +12618,12 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
|
||||||
const int64_t ir0 = dr*ith;
|
const int64_t ir0 = dr*ith;
|
||||||
const int64_t ir1 = MIN(ir0 + dr, nr);
|
const int64_t ir1 = MIN(ir0 + dr, nr);
|
||||||
|
|
||||||
const float d_by_nr = ((const float *) opt0->data)[0] / (float) nr;
|
const float d_by_nr = ((const float *) grad->data)[0] / (float) nr;
|
||||||
|
|
||||||
for (int64_t i1 = ir0; i1 < ir1; i1++) {
|
for (int64_t i1 = ir0; i1 < ir1; i1++) {
|
||||||
float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
|
float * ds0 = (float *)((char *) dst->data + i1*dst->nb[1]);
|
||||||
float * s0 = (float *)((char *) src0->data + i1*src0->nb[1]);
|
const float * s0 = (const float *)((const char *) src0f->data + i1*src0f->nb[1]);
|
||||||
float * s1 = (float *)((char *) src1->data + i1*src1->nb[1]);
|
const float * s1 = (const float *)((const char *) src1f->data + i1*src1f->nb[1]);
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
for (int64_t i = 0; i < nc; ++i) {
|
for (int64_t i = 0; i < nc; ++i) {
|
||||||
|
@ -12564,11 +12636,11 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
|
||||||
// soft_max
|
// soft_max
|
||||||
float max = -INFINITY;
|
float max = -INFINITY;
|
||||||
ggml_vec_max_f32(nc, &max, s0);
|
ggml_vec_max_f32(nc, &max, s0);
|
||||||
ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max);
|
const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max);
|
||||||
assert(sum > 0.0);
|
assert(sum > 0.0);
|
||||||
ggml_vec_scale_f32(nc, ds0, 1.0/sum);
|
ggml_vec_scale_f32(nc, ds0, 1.0/sum);
|
||||||
|
|
||||||
// grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr
|
// grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr
|
||||||
ggml_vec_sub_f32(nc, ds0, ds0, s1);
|
ggml_vec_sub_f32(nc, ds0, ds0, s1);
|
||||||
ggml_vec_scale_f32(nc, ds0, d_by_nr);
|
ggml_vec_scale_f32(nc, ds0, d_by_nr);
|
||||||
|
|
||||||
|
@ -12865,7 +12937,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_SOFT_MAX_BACK:
|
case GGML_OP_SOFT_MAX_BACK:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_soft_max_back(params, tensor);
|
ggml_compute_forward_soft_max_ext_back(params, tensor);
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
{
|
{
|
||||||
|
|
|
@ -403,6 +403,16 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st
|
||||||
op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float
|
op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
return src1->type == GGML_TYPE_F32 || src1->type == ggml_get_type_traits_cpu(src0->type)->vec_dot_type;
|
return src1->type == GGML_TYPE_F32 || src1->type == ggml_get_type_traits_cpu(src0->type)->vec_dot_type;
|
||||||
|
case GGML_OP_SOFT_MAX_BACK: {
|
||||||
|
if (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type != GGML_TYPE_F32) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
return max_bias == 0.0f;
|
||||||
|
}
|
||||||
case GGML_OP_IM2COL_BACK:
|
case GGML_OP_IM2COL_BACK:
|
||||||
return src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32;
|
return src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32;
|
||||||
case GGML_OP_OUT_PROD:
|
case GGML_OP_OUT_PROD:
|
||||||
|
|
|
@ -5,95 +5,89 @@
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
static __global__ void cross_entropy_loss_f32(const float * logits, const float * labels, float * dst, const int nclasses, const int k) {
|
template <bool use_shared>
|
||||||
const int warp_id = threadIdx.x / WARP_SIZE;
|
static __global__ void cross_entropy_loss_f32(
|
||||||
const int lane_id = threadIdx.x % WARP_SIZE;
|
const float * __restrict__ logits, const float * __restrict__ labels, float * __restrict__ dst, const int nclasses, const int k) {
|
||||||
const int i0 = blockDim.x*blockIdx.x + warp_id*WARP_SIZE;
|
extern __shared__ float tmp[];
|
||||||
|
|
||||||
const int ne_tmp = WARP_SIZE*nclasses;
|
logits += int64_t(blockIdx.x)*nclasses;
|
||||||
|
labels += int64_t(blockIdx.x)*nclasses;
|
||||||
extern __shared__ float tmp_all[];
|
|
||||||
float * tmp_logits = tmp_all + (2*warp_id + 0)*ne_tmp;
|
|
||||||
float * tmp_labels = tmp_all + (2*warp_id + 1)*ne_tmp;
|
|
||||||
|
|
||||||
// Each warp first loads ne_tmp logits/labels into shared memory:
|
|
||||||
for (int i = lane_id; i < ne_tmp; i += WARP_SIZE) {
|
|
||||||
const int ig = i0*nclasses + i; // ig == i global
|
|
||||||
|
|
||||||
tmp_logits[i] = ig < k*nclasses ? logits[ig] : 0.0f;
|
|
||||||
tmp_labels[i] = ig < k*nclasses ? labels[ig] : 0.0f;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Each thread in the warp then calculates the cross entropy loss for a single row.
|
|
||||||
// TODO: pad in order to avoid shared memory bank conflicts.
|
|
||||||
|
|
||||||
// Find maximum for softmax:
|
// Find maximum for softmax:
|
||||||
float max = -INFINITY;
|
float max_logit = -INFINITY;
|
||||||
for (int i = 0; i < nclasses; ++i) {
|
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
||||||
max = fmaxf(max, tmp_logits[lane_id*nclasses + i]);
|
const float val = logits[i];
|
||||||
|
max_logit = fmaxf(max_logit, val);
|
||||||
|
|
||||||
|
if (use_shared) {
|
||||||
|
tmp[i] = val;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
max_logit = warp_reduce_max(max_logit);
|
||||||
|
|
||||||
// Calculate log(softmax(logits)) which is just logits - max:
|
// Calculate log(softmax(logits)) which is just logits - max:
|
||||||
float sum = 0.0f;
|
float sum = 0.0f;
|
||||||
for (int i = 0; i < nclasses; ++i) {
|
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
||||||
float val = tmp_logits[lane_id*nclasses + i] - max;
|
const float logit_i = use_shared ? tmp[i] : logits[i];
|
||||||
sum += expf(val);
|
sum += expf(logit_i - max_logit);
|
||||||
tmp_logits[lane_id*nclasses + i] = val;
|
|
||||||
}
|
}
|
||||||
|
sum = warp_reduce_sum(sum);
|
||||||
sum = logf(sum);
|
sum = logf(sum);
|
||||||
|
|
||||||
// log(exp(logits - max) / sum) = (logits - max) - log(sum)
|
// log(exp(logits - max) / sum) = (logits - max) - log(sum)
|
||||||
float loss = 0.0f;
|
float loss = 0.0f;
|
||||||
for (int i = 0; i < nclasses; ++i) {
|
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
||||||
loss += (tmp_logits[lane_id*nclasses + i] - sum) * tmp_labels[lane_id*nclasses + i];
|
const float logit_i = use_shared ? tmp[i] : logits[i];
|
||||||
|
loss += (logit_i - max_logit - sum) * labels[i];
|
||||||
}
|
}
|
||||||
loss = -warp_reduce_sum(loss) / (float)k;
|
loss = -warp_reduce_sum(loss) / (float)k;
|
||||||
|
|
||||||
__syncthreads();
|
if (threadIdx.x != 0) {
|
||||||
|
|
||||||
if (lane_id == 0) {
|
|
||||||
tmp_all[warp_id] = loss;
|
|
||||||
}
|
|
||||||
|
|
||||||
__syncthreads();
|
|
||||||
|
|
||||||
if (warp_id != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
loss = lane_id < CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE/WARP_SIZE ? tmp_all[lane_id] : 0.0f;
|
|
||||||
loss = warp_reduce_sum(loss);
|
|
||||||
|
|
||||||
if (lane_id != 0) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
dst[blockIdx.x] = loss;
|
dst[blockIdx.x] = loss;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __global__ void cross_entropy_loss_back_f32(const float * logits, const float * labels, const float * loss, float * dst, const int nclasses) {
|
template <bool use_shared>
|
||||||
|
static __global__ void cross_entropy_loss_back_f32(
|
||||||
|
const float * __restrict__ grad, const float * __restrict__ logits, const float * __restrict__ labels,
|
||||||
|
float * __restrict__ dst, const int nclasses) {
|
||||||
extern __shared__ float tmp[];
|
extern __shared__ float tmp[];
|
||||||
|
|
||||||
|
logits += int64_t(blockIdx.x)*nclasses;
|
||||||
|
labels += int64_t(blockIdx.x)*nclasses;
|
||||||
|
dst += int64_t(blockIdx.x)*nclasses;
|
||||||
|
|
||||||
float maxval = -INFINITY;
|
float maxval = -INFINITY;
|
||||||
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
||||||
const float val = logits[blockIdx.x*nclasses + i];
|
const float val = logits[i];
|
||||||
maxval = fmaxf(maxval, val);
|
maxval = fmaxf(maxval, val);
|
||||||
tmp[i] = val;
|
|
||||||
|
if (use_shared) {
|
||||||
|
tmp[i] = val;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
maxval = warp_reduce_max(maxval);
|
maxval = warp_reduce_max(maxval);
|
||||||
|
|
||||||
float sum = 0.0f;
|
float sum = 0.0f;
|
||||||
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
||||||
const float val = expf(tmp[i] - maxval);
|
const float val = expf((use_shared ? tmp[i] : logits[i]) - maxval);
|
||||||
sum += val;
|
sum += val;
|
||||||
tmp[i] = val;
|
|
||||||
|
if (use_shared) {
|
||||||
|
tmp[i] = val;
|
||||||
|
} else {
|
||||||
|
dst[i] = val;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sum = warp_reduce_sum(sum);
|
sum = warp_reduce_sum(sum);
|
||||||
const float sm_scale = 1.0f/sum;
|
const float sm_scale = 1.0f/sum;
|
||||||
|
|
||||||
const float d_by_nrows = *loss/gridDim.x;
|
const float d_by_nrows = *grad/gridDim.x;
|
||||||
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
for (int i = threadIdx.x; i < nclasses; i += WARP_SIZE) {
|
||||||
dst[blockIdx.x*nclasses + i] = (tmp[i]*sm_scale - labels[blockIdx.x*nclasses + i])*d_by_nrows;
|
const float val = use_shared ? tmp[i] : dst[i];
|
||||||
|
dst[i] = (val*sm_scale - labels[i])*d_by_nrows;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,48 +113,77 @@ void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor *
|
||||||
ggml_cuda_pool & pool = ctx.pool();
|
ggml_cuda_pool & pool = ctx.pool();
|
||||||
cudaStream_t stream = ctx.stream();
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
const dim3 blocks_dim(CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE, 1, 1);
|
const dim3 blocks_dim(WARP_SIZE, 1, 1);
|
||||||
const dim3 blocks_num((nrows + CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE - 1) / CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE, 1, 1);
|
const dim3 blocks_num(nrows, 1, 1);
|
||||||
const int shmem = 2*CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE*ne00*sizeof(float);
|
const size_t nbytes_shared = ne00*sizeof(float);
|
||||||
|
|
||||||
|
const int id = ggml_cuda_get_device();
|
||||||
|
const size_t smpbo = ggml_cuda_info().devices[id].smpbo;
|
||||||
|
|
||||||
ggml_cuda_pool_alloc<float> dst_tmp(pool, blocks_num.x);
|
ggml_cuda_pool_alloc<float> dst_tmp(pool, blocks_num.x);
|
||||||
|
|
||||||
cross_entropy_loss_f32<<<blocks_num, blocks_dim, shmem, stream>>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows);
|
if (nbytes_shared <= smpbo) {
|
||||||
|
#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false};
|
||||||
|
if (!shared_memory_limit_raised[id]) {
|
||||||
|
CUDA_CHECK(cudaFuncSetAttribute(cross_entropy_loss_back_f32<true>, cudaFuncAttributeMaxDynamicSharedMemorySize, smpbo));
|
||||||
|
shared_memory_limit_raised[id] = true;
|
||||||
|
}
|
||||||
|
#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
cross_entropy_loss_f32<true><<<blocks_num, blocks_dim, nbytes_shared, stream>>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows);
|
||||||
|
} else {
|
||||||
|
cross_entropy_loss_f32<false><<<blocks_num, blocks_dim, 0, stream>>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows);
|
||||||
|
}
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Combine results from individual blocks:
|
// Combine results from individual blocks:
|
||||||
sum_f32_cuda(pool, dst_tmp.ptr, dst_d, blocks_num.x, stream);
|
sum_f32_cuda(pool, dst_tmp.ptr, dst_d, blocks_num.x, stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * grad = dst->src[0];
|
||||||
const ggml_tensor * src1 = dst->src[1];
|
const ggml_tensor * src0f = dst->src[1];
|
||||||
const ggml_tensor * opt0 = dst->src[2];
|
const ggml_tensor * src1f = dst->src[2];
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0f->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
GGML_ASSERT(src1f->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(opt0->type == GGML_TYPE_F32);
|
GGML_ASSERT( grad->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
GGML_ASSERT(ggml_is_scalar(grad));
|
||||||
GGML_ASSERT(ggml_is_contiguous(src1));
|
GGML_ASSERT(ggml_is_contiguous(src0f));
|
||||||
GGML_ASSERT(ggml_is_contiguous(opt0));
|
GGML_ASSERT(ggml_is_contiguous(src1f));
|
||||||
GGML_ASSERT(ggml_is_contiguous(dst));
|
GGML_ASSERT(ggml_is_contiguous(dst));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, src1));
|
GGML_ASSERT(ggml_are_same_shape(src0f, src1f));
|
||||||
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
GGML_ASSERT(ggml_are_same_shape(src0f, dst));
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0f->ne[0];
|
||||||
const int64_t nrows = ggml_nrows(src0);
|
const int64_t nrows = ggml_nrows(src0f);
|
||||||
|
|
||||||
const float * src0_d = (const float *) src0->data;
|
const float * grad_d = (const float *) grad->data;
|
||||||
const float * src1_d = (const float *) src1->data;
|
const float * src0f_d = (const float *) src0f->data;
|
||||||
const float * opt0_d = (const float *) opt0->data;
|
const float * src1f_d = (const float *) src1f->data;
|
||||||
float * dst_d = (float *) dst->data;
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
cudaStream_t stream = ctx.stream();
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
const dim3 blocks_dim(WARP_SIZE, 1, 1);
|
const dim3 blocks_dim(WARP_SIZE, 1, 1);
|
||||||
const dim3 blocks_num(nrows, 1, 1);
|
const dim3 blocks_num(nrows, 1, 1);
|
||||||
const int shmem = ne00*sizeof(float);
|
const size_t nbytes_shared = ne00*sizeof(float);
|
||||||
|
|
||||||
cross_entropy_loss_back_f32<<<blocks_num, blocks_dim, shmem, stream>>>(src0_d, src1_d, opt0_d, dst_d, ne00);
|
const int id = ggml_cuda_get_device();
|
||||||
|
const size_t smpbo = ggml_cuda_info().devices[id].smpbo;
|
||||||
|
|
||||||
|
if (nbytes_shared <= smpbo) {
|
||||||
|
#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false};
|
||||||
|
if (!shared_memory_limit_raised[id]) {
|
||||||
|
CUDA_CHECK(cudaFuncSetAttribute(cross_entropy_loss_back_f32<true>, cudaFuncAttributeMaxDynamicSharedMemorySize, smpbo));
|
||||||
|
shared_memory_limit_raised[id] = true;
|
||||||
|
}
|
||||||
|
#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
cross_entropy_loss_back_f32<true><<<blocks_num, blocks_dim, nbytes_shared, stream>>>(grad_d, src0f_d, src1f_d, dst_d, ne00);
|
||||||
|
} else {
|
||||||
|
cross_entropy_loss_back_f32<false><<<blocks_num, blocks_dim, 0, stream>>>(grad_d, src0f_d, src1f_d, dst_d, ne00);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,15 +3,15 @@
|
||||||
|
|
||||||
template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
||||||
static __global__ void k_get_rows(
|
static __global__ void k_get_rows(
|
||||||
const void * src0, const int32_t * src1, dst_t * dst,
|
const void * __restrict__ src0, const int32_t * __restrict__ src1, dst_t * __restrict__ dst,
|
||||||
int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
|
const int64_t ne00, /*const int64_t ne01, const int64_t ne02, const int64_t ne03,*/
|
||||||
/*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
|
/*const int64_t ne10, const int64_t ne11,*/ const int64_t ne12, /*const int64_t ne13,*/
|
||||||
/*size_t s0,*/ size_t s1, size_t s2, size_t s3,
|
/*const size_t s0,*/ const size_t s1, const size_t s2, const size_t s3,
|
||||||
/*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
|
/*const size_t nb00,*/ const size_t nb01, const size_t nb02, const size_t nb03,
|
||||||
size_t s10, size_t s11, size_t s12/*, size_t s13*/) {
|
const size_t s10, const size_t s11, const size_t s12/*, const size_t s13*/) {
|
||||||
|
|
||||||
const int i00 = (blockIdx.x*blockDim.x + threadIdx.x)*2;
|
const int i00 = (blockIdx.x*blockDim.x + threadIdx.x)*2;
|
||||||
const int i10 = blockDim.y*blockIdx.y + threadIdx.y;
|
const int i10 = blockDim.y*blockIdx.y + threadIdx.y;
|
||||||
const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12;
|
const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12;
|
||||||
const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12;
|
const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12;
|
||||||
|
|
||||||
|
@ -22,10 +22,10 @@ static __global__ void k_get_rows(
|
||||||
const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
|
const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
|
||||||
|
|
||||||
dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
|
dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
|
||||||
const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03;
|
const void * src0_row = (const char *) src0 + i01*nb01 + i11*nb02 + i12*nb03;
|
||||||
|
|
||||||
const int ib = i00/qk; // block index
|
const int ib = i00/qk; // block index
|
||||||
const int iqs = (i00%qk)/qr; // quant index
|
const int iqs = (i00%qk)/qr; // quant index
|
||||||
const int iybs = i00 - i00%qk; // dst block start index
|
const int iybs = i00 - i00%qk; // dst block start index
|
||||||
const int y_offset = qr == 1 ? 1 : qk/2;
|
const int y_offset = qr == 1 ? 1 : qk/2;
|
||||||
|
|
||||||
|
@ -39,15 +39,15 @@ static __global__ void k_get_rows(
|
||||||
|
|
||||||
template<typename src0_t, typename dst_t>
|
template<typename src0_t, typename dst_t>
|
||||||
static __global__ void k_get_rows_float(
|
static __global__ void k_get_rows_float(
|
||||||
const src0_t * src0, const int32_t * src1, dst_t * dst,
|
const src0_t * __restrict__ src0, const int32_t * __restrict__ src1, dst_t * __restrict__ dst,
|
||||||
int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
|
const int64_t ne00, /*const int64_t ne01, const int64_t ne02, const int64_t ne03,*/
|
||||||
/*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
|
/*const int64_t ne10, const int64_t ne11,*/ const int64_t ne12, /*const int64_t ne13,*/
|
||||||
/*size_t s0,*/ size_t s1, size_t s2, size_t s3,
|
/*const size_t s0,*/ const size_t s1, const size_t s2, const size_t s3,
|
||||||
/*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
|
/*const size_t nb00,*/ const size_t nb01, const size_t nb02, const size_t nb03,
|
||||||
size_t s10, size_t s11, size_t s12/*, size_t s13*/) {
|
const size_t s10, const size_t s11, const size_t s12/*, const size_t s13*/) {
|
||||||
|
|
||||||
const int i00 = blockIdx.x*blockDim.x + threadIdx.x;
|
const int i00 = blockIdx.x*blockDim.x + threadIdx.x;
|
||||||
const int i10 = blockDim.y*blockIdx.y + threadIdx.y;
|
const int i10 = blockDim.y*blockIdx.y + threadIdx.y;
|
||||||
const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12;
|
const int i11 = (blockIdx.z*blockDim.z + threadIdx.z)/ne12;
|
||||||
const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12;
|
const int i12 = (blockIdx.z*blockDim.z + threadIdx.z)%ne12;
|
||||||
|
|
||||||
|
@ -58,14 +58,38 @@ static __global__ void k_get_rows_float(
|
||||||
const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
|
const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
|
||||||
|
|
||||||
dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
|
dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
|
||||||
const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03);
|
const src0_t * src0_row = (const src0_t *)((const char *) src0 + i01*nb01 + i11*nb02 + i12*nb03);
|
||||||
|
|
||||||
dst_row[i00] = src0_row[i00];
|
dst_row[i00] = src0_row[i00];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename grad_t, typename dst_t>
|
||||||
|
static __global__ void k_get_rows_back_float(
|
||||||
|
const grad_t * __restrict__ grad, const int32_t * __restrict__ rows, dst_t * __restrict__ dst, const int64_t ncols, const int64_t nrows_grad) {
|
||||||
|
const int col = blockIdx.x*blockDim.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (col >= ncols) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int dst_row = blockIdx.y*blockDim.y + threadIdx.y;
|
||||||
|
|
||||||
|
float sum = 0.0f;
|
||||||
|
|
||||||
|
for (int64_t i = 0; i < nrows_grad; ++i) {
|
||||||
|
if (rows[i] != dst_row) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
sum += grad[i*ncols + col];
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[dst_row*ncols + col] = sum;
|
||||||
|
}
|
||||||
|
|
||||||
template<int qk, int qr, dequantize_kernel_t dq>
|
template<int qk, int qr, dequantize_kernel_t dq>
|
||||||
static void get_rows_cuda(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
|
static void get_rows_cuda(
|
||||||
const void * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) {
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
|
||||||
|
const void * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) {
|
||||||
|
|
||||||
GGML_TENSOR_BINARY_OP_LOCALS
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
@ -87,22 +111,25 @@ static void get_rows_cuda(const ggml_tensor * src0, const ggml_tensor * src1, gg
|
||||||
GGML_ASSERT(ne00 % 2 == 0);
|
GGML_ASSERT(ne00 % 2 == 0);
|
||||||
|
|
||||||
k_get_rows<qk, qr, dq><<<block_nums, block_dims, 0, stream>>>(
|
k_get_rows<qk, qr, dq><<<block_nums, block_dims, 0, stream>>>(
|
||||||
src0_dd, src1_dd, dst_dd,
|
src0_dd, src1_dd, dst_dd,
|
||||||
ne00, /*ne01, ne02, ne03,*/
|
ne00, /*ne01, ne02, ne03,*/
|
||||||
/*ne10, ne11,*/ ne12, /*ne13,*/
|
/*ne10, ne11,*/ ne12, /*ne13,*/
|
||||||
/* s0,*/ s1, s2, s3,
|
/* s0,*/ s1, s2, s3,
|
||||||
/* nb00,*/ nb01, nb02, nb03,
|
/* nb00,*/ nb01, nb02, nb03,
|
||||||
s10, s11, s12/*, s13*/);
|
s10, s11, s12/*, s13*/);
|
||||||
|
|
||||||
GGML_UNUSED(dst);
|
GGML_UNUSED(dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename src0_t>
|
template<typename src0_t>
|
||||||
static void get_rows_cuda_float(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
|
static void get_rows_cuda_float(
|
||||||
const src0_t * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) {
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
|
||||||
|
const src0_t * src0_dd, const int32_t * src1_dd, float * dst_dd, cudaStream_t stream) {
|
||||||
|
|
||||||
GGML_TENSOR_BINARY_OP_LOCALS
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
GGML_ASSERT(ne13 == 1);
|
||||||
|
|
||||||
const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1);
|
const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1);
|
||||||
const int block_num_x = (ne00 + CUDA_GET_ROWS_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BLOCK_SIZE;
|
const int block_num_x = (ne00 + CUDA_GET_ROWS_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BLOCK_SIZE;
|
||||||
const dim3 block_nums(block_num_x, ne10, ne11*ne12);
|
const dim3 block_nums(block_num_x, ne10, ne11*ne12);
|
||||||
|
@ -119,12 +146,12 @@ static void get_rows_cuda_float(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
//const size_t s13 = nb13 / ggml_element_size(src1);
|
//const size_t s13 = nb13 / ggml_element_size(src1);
|
||||||
|
|
||||||
k_get_rows_float<<<block_nums, block_dims, 0, stream>>>(
|
k_get_rows_float<<<block_nums, block_dims, 0, stream>>>(
|
||||||
src0_dd, src1_dd, dst_dd,
|
src0_dd, src1_dd, dst_dd,
|
||||||
ne00, /*ne01, ne02, ne03,*/
|
ne00, /*ne01, ne02, ne03,*/
|
||||||
/*ne10, ne11,*/ ne12, /*ne13,*/
|
/*ne10, ne11,*/ ne12, /*ne13,*/
|
||||||
/* s0,*/ s1, s2, s3,
|
/* s0,*/ s1, s2, s3,
|
||||||
/* nb00,*/ nb01, nb02, nb03,
|
/* nb00,*/ nb01, nb02, nb03,
|
||||||
s10, s11, s12/*, s13*/);
|
s10, s11, s12/*, s13*/);
|
||||||
|
|
||||||
GGML_UNUSED(dst);
|
GGML_UNUSED(dst);
|
||||||
}
|
}
|
||||||
|
@ -132,42 +159,41 @@ static void get_rows_cuda_float(const ggml_tensor * src0, const ggml_tensor * sr
|
||||||
void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const ggml_tensor * src1 = dst->src[1];
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
const float * src0_d = (const float *)src0->data;
|
|
||||||
const float * src1_d = (const float *)src1->data;
|
const void * src0_d = (const void *) src0->data;
|
||||||
float * dst_d = (float *)dst->data;
|
const int32_t * src1_d = (const int32_t *) src1->data;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
cudaStream_t stream = ctx.stream();
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
|
||||||
GGML_ASSERT(src1->type == GGML_TYPE_I32);
|
GGML_ASSERT(src1->type == GGML_TYPE_I32);
|
||||||
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
|
GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
|
||||||
GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type));
|
GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type));
|
||||||
GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type));
|
GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type));
|
||||||
|
|
||||||
const int32_t * src1_i32 = (const int32_t *) src1_d;
|
|
||||||
|
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
get_rows_cuda_float(src0, src1, dst, (const half *)src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda_float(src0, src1, dst, (const half *) src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
get_rows_cuda_float(src0, src1, dst, src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda_float(src0, src1, dst, (const float *) src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
case GGML_TYPE_Q4_0:
|
case GGML_TYPE_Q4_0:
|
||||||
get_rows_cuda<QK4_0, QR4_0, dequantize_q4_0>(src0, src1, dst, src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda<QK4_0, QR4_0, dequantize_q4_0>(src0, src1, dst, src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
case GGML_TYPE_Q4_1:
|
case GGML_TYPE_Q4_1:
|
||||||
get_rows_cuda<QK4_1, QR4_1, dequantize_q4_1>(src0, src1, dst, src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda<QK4_1, QR4_1, dequantize_q4_1>(src0, src1, dst, src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
case GGML_TYPE_Q5_0:
|
case GGML_TYPE_Q5_0:
|
||||||
get_rows_cuda<QK5_0, QR5_0, dequantize_q5_0>(src0, src1, dst, src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda<QK5_0, QR5_0, dequantize_q5_0>(src0, src1, dst, src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
case GGML_TYPE_Q5_1:
|
case GGML_TYPE_Q5_1:
|
||||||
get_rows_cuda<QK5_1, QR5_1, dequantize_q5_1>(src0, src1, dst, src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda<QK5_1, QR5_1, dequantize_q5_1>(src0, src1, dst, src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
case GGML_TYPE_Q8_0:
|
case GGML_TYPE_Q8_0:
|
||||||
get_rows_cuda<QK8_0, QR8_0, dequantize_q8_0>(src0, src1, dst, src0_d, src1_i32, dst_d, stream);
|
get_rows_cuda<QK8_0, QR8_0, dequantize_q8_0>(src0, src1, dst, src0_d, src1_d, dst_d, stream);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
// TODO: k-quants
|
// TODO: k-quants
|
||||||
|
@ -175,3 +201,34 @@ void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_get_rows_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output
|
||||||
|
const ggml_tensor * src1 = dst->src[1]; // src1 in forward pass
|
||||||
|
|
||||||
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
const float * src0_d = (const float *) src0->data;
|
||||||
|
const int32_t * src1_d = (const int32_t *) src1->data;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_I32);
|
||||||
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src1));
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(dst));
|
||||||
|
|
||||||
|
GGML_ASSERT(ne02*ne03 == 1);
|
||||||
|
GGML_ASSERT(ne12*ne13 == 1);
|
||||||
|
GGML_ASSERT(ne2*ne3 == 1);
|
||||||
|
|
||||||
|
const dim3 block_dims(CUDA_GET_ROWS_BACK_BLOCK_SIZE, 1, 1);
|
||||||
|
const int block_num_x = (ne00 + CUDA_GET_ROWS_BACK_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BACK_BLOCK_SIZE;
|
||||||
|
const dim3 block_nums(block_num_x, ne1, 1);
|
||||||
|
|
||||||
|
k_get_rows_back_float<<<block_nums, block_dims, 0, stream>>>(src0_d, src1_d, dst_d, ne00, ne10);
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
#include "common.cuh"
|
#include "common.cuh"
|
||||||
|
|
||||||
#define CUDA_GET_ROWS_BLOCK_SIZE 256
|
#define CUDA_GET_ROWS_BLOCK_SIZE 256
|
||||||
|
#define CUDA_GET_ROWS_BACK_BLOCK_SIZE 256
|
||||||
|
|
||||||
void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_op_get_rows_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
|
@ -2008,6 +2008,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||||
case GGML_OP_GET_ROWS:
|
case GGML_OP_GET_ROWS:
|
||||||
ggml_cuda_op_get_rows(ctx, dst);
|
ggml_cuda_op_get_rows(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_GET_ROWS_BACK:
|
||||||
|
ggml_cuda_op_get_rows_back(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_OP_DUP:
|
case GGML_OP_DUP:
|
||||||
ggml_cuda_dup(ctx, dst);
|
ggml_cuda_dup(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
@ -2096,9 +2099,15 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||||
case GGML_OP_LEAKY_RELU:
|
case GGML_OP_LEAKY_RELU:
|
||||||
ggml_cuda_op_leaky_relu(ctx, dst);
|
ggml_cuda_op_leaky_relu(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_SILU_BACK:
|
||||||
|
ggml_cuda_op_silu_back(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_OP_RMS_NORM:
|
case GGML_OP_RMS_NORM:
|
||||||
ggml_cuda_op_rms_norm(ctx, dst);
|
ggml_cuda_op_rms_norm(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_RMS_NORM_BACK:
|
||||||
|
ggml_cuda_op_rms_norm_back(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) {
|
if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) {
|
||||||
GGML_LOG_ERROR("%s: cannot compute %s: src0->ne[3] = %" PRId64 ", src1->ne[3] = %" PRId64 " - fallback to CPU\n", __func__, dst->name, dst->src[0]->ne[3], dst->src[1]->ne[3]);
|
GGML_LOG_ERROR("%s: cannot compute %s: src0->ne[3] = %" PRId64 ", src1->ne[3] = %" PRId64 " - fallback to CPU\n", __func__, dst->name, dst->src[0]->ne[3], dst->src[1]->ne[3]);
|
||||||
|
@ -2143,6 +2152,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
ggml_cuda_op_soft_max(ctx, dst);
|
ggml_cuda_op_soft_max(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_SOFT_MAX_BACK:
|
||||||
|
ggml_cuda_op_soft_max_back(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
ggml_cuda_op_rope(ctx, dst);
|
ggml_cuda_op_rope(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
@ -2917,7 +2929,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_OUT_PROD:
|
case GGML_OP_OUT_PROD:
|
||||||
return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->ne[2] == 1 && op->ne[3] == 1;
|
return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32;
|
||||||
case GGML_OP_GET_ROWS:
|
case GGML_OP_GET_ROWS:
|
||||||
{
|
{
|
||||||
switch (op->src[0]->type) {
|
switch (op->src[0]->type) {
|
||||||
|
@ -2933,6 +2945,10 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_OP_GET_ROWS_BACK:
|
||||||
|
{
|
||||||
|
return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->ne[2] == 1 && op->ne[3] == 1;
|
||||||
|
} break;
|
||||||
case GGML_OP_CPY:
|
case GGML_OP_CPY:
|
||||||
{
|
{
|
||||||
ggml_type src0_type = op->src[0]->type;
|
ggml_type src0_type = op->src[0]->type;
|
||||||
|
@ -3006,8 +3022,12 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_OP_SILU_BACK:
|
||||||
|
return ggml_is_contiguous(op->src[0]);
|
||||||
|
break;
|
||||||
case GGML_OP_NORM:
|
case GGML_OP_NORM:
|
||||||
case GGML_OP_RMS_NORM:
|
case GGML_OP_RMS_NORM:
|
||||||
|
case GGML_OP_RMS_NORM_BACK:
|
||||||
return ggml_is_contiguous(op->src[0]) && op->ne[0] % WARP_SIZE == 0;
|
return ggml_is_contiguous(op->src[0]) && op->ne[0] % WARP_SIZE == 0;
|
||||||
break;
|
break;
|
||||||
case GGML_OP_NONE:
|
case GGML_OP_NONE:
|
||||||
|
@ -3032,6 +3052,11 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
return true;
|
return true;
|
||||||
|
case GGML_OP_SOFT_MAX_BACK: {
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float));
|
||||||
|
return max_bias == 0.0f;
|
||||||
|
}
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
case GGML_OP_ROPE_BACK: {
|
case GGML_OP_ROPE_BACK: {
|
||||||
const size_t ts = ggml_type_size(op->src[0]->type);
|
const size_t ts = ggml_type_size(op->src[0]->type);
|
||||||
|
|
|
@ -5,20 +5,24 @@ static __global__ void norm_f32(const float * x, float * dst, const int ncols, c
|
||||||
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
|
|
||||||
float2 mean_var = make_float2(0.f, 0.f);
|
x += int64_t(row)*ncols;
|
||||||
|
dst += int64_t(row)*ncols;
|
||||||
|
|
||||||
|
float2 mean_var = make_float2(0.0f, 0.0f);
|
||||||
|
|
||||||
for (int col = tid; col < ncols; col += block_size) {
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
const float xi = x[row*ncols + col];
|
const float xi = x[col];
|
||||||
mean_var.x += xi;
|
mean_var.x += xi;
|
||||||
mean_var.y += xi * xi;
|
mean_var.y += xi * xi;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum up partial sums
|
// sum up partial sums
|
||||||
mean_var = warp_reduce_sum(mean_var);
|
mean_var = warp_reduce_sum(mean_var);
|
||||||
if (block_size > WARP_SIZE) {
|
if constexpr (block_size > WARP_SIZE) {
|
||||||
|
static_assert(block_size == 1024, "unexpected block_size");
|
||||||
__shared__ float2 s_sum[32];
|
__shared__ float2 s_sum[32];
|
||||||
int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
int lane_id = threadIdx.x % WARP_SIZE;
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
if (lane_id == 0) {
|
if (lane_id == 0) {
|
||||||
s_sum[warp_id] = mean_var;
|
s_sum[warp_id] = mean_var;
|
||||||
}
|
}
|
||||||
|
@ -32,7 +36,7 @@ static __global__ void norm_f32(const float * x, float * dst, const int ncols, c
|
||||||
const float inv_std = rsqrtf(var + eps);
|
const float inv_std = rsqrtf(var + eps);
|
||||||
|
|
||||||
for (int col = tid; col < ncols; col += block_size) {
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
dst[row*ncols + col] = (x[row*ncols + col] - mean) * inv_std;
|
dst[col] = (x[col] - mean) * inv_std;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,14 +44,8 @@ template <int block_size>
|
||||||
static __global__ void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps) {
|
static __global__ void group_norm_f32(const float * x, float * dst, const int group_size, const int ne_elements, const float eps) {
|
||||||
// blockIdx.x: num_groups idx
|
// blockIdx.x: num_groups idx
|
||||||
// threadIdx.x: block_size idx
|
// threadIdx.x: block_size idx
|
||||||
int start = blockIdx.x * group_size;
|
const int start = blockIdx.x*group_size + threadIdx.x;
|
||||||
int end = start + group_size;
|
const int end = min(blockIdx.x*group_size + group_size, ne_elements);
|
||||||
|
|
||||||
start += threadIdx.x;
|
|
||||||
|
|
||||||
if (end >= ne_elements) {
|
|
||||||
end = ne_elements;
|
|
||||||
}
|
|
||||||
|
|
||||||
float tmp = 0.0f; // partial sum for thread in warp
|
float tmp = 0.0f; // partial sum for thread in warp
|
||||||
|
|
||||||
|
@ -56,10 +54,11 @@ static __global__ void group_norm_f32(const float * x, float * dst, const int gr
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp = warp_reduce_sum(tmp);
|
tmp = warp_reduce_sum(tmp);
|
||||||
if (block_size > WARP_SIZE) {
|
if constexpr (block_size > WARP_SIZE) {
|
||||||
|
static_assert(block_size == 1024, "unexpected block_size");
|
||||||
__shared__ float s_sum[32];
|
__shared__ float s_sum[32];
|
||||||
int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
int lane_id = threadIdx.x % WARP_SIZE;
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
if (lane_id == 0) {
|
if (lane_id == 0) {
|
||||||
s_sum[warp_id] = tmp;
|
s_sum[warp_id] = tmp;
|
||||||
}
|
}
|
||||||
|
@ -68,11 +67,11 @@ static __global__ void group_norm_f32(const float * x, float * dst, const int gr
|
||||||
tmp = warp_reduce_sum(tmp);
|
tmp = warp_reduce_sum(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
float mean = tmp / group_size;
|
const float mean = tmp / group_size;
|
||||||
tmp = 0.0f;
|
tmp = 0.0f;
|
||||||
|
|
||||||
for (int j = start; j < end; j += block_size) {
|
for (int j = start; j < end; j += block_size) {
|
||||||
float xi = x[j] - mean;
|
const float xi = x[j] - mean;
|
||||||
dst[j] = xi;
|
dst[j] = xi;
|
||||||
tmp += xi * xi;
|
tmp += xi * xi;
|
||||||
}
|
}
|
||||||
|
@ -80,8 +79,8 @@ static __global__ void group_norm_f32(const float * x, float * dst, const int gr
|
||||||
tmp = warp_reduce_sum(tmp);
|
tmp = warp_reduce_sum(tmp);
|
||||||
if (block_size > WARP_SIZE) {
|
if (block_size > WARP_SIZE) {
|
||||||
__shared__ float s_sum[32];
|
__shared__ float s_sum[32];
|
||||||
int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
int lane_id = threadIdx.x % WARP_SIZE;
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
if (lane_id == 0) {
|
if (lane_id == 0) {
|
||||||
s_sum[warp_id] = tmp;
|
s_sum[warp_id] = tmp;
|
||||||
}
|
}
|
||||||
|
@ -90,8 +89,8 @@ static __global__ void group_norm_f32(const float * x, float * dst, const int gr
|
||||||
tmp = warp_reduce_sum(tmp);
|
tmp = warp_reduce_sum(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
float variance = tmp / group_size;
|
const float variance = tmp / group_size;
|
||||||
float scale = rsqrtf(variance + eps);
|
const float scale = rsqrtf(variance + eps);
|
||||||
for (int j = start; j < end; j += block_size) {
|
for (int j = start; j < end; j += block_size) {
|
||||||
dst[j] *= scale;
|
dst[j] *= scale;
|
||||||
}
|
}
|
||||||
|
@ -102,19 +101,23 @@ static __global__ void rms_norm_f32(const float * x, float * dst, const int ncol
|
||||||
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
|
|
||||||
|
x += int64_t(row)*ncols;
|
||||||
|
dst += int64_t(row)*ncols;
|
||||||
|
|
||||||
float tmp = 0.0f; // partial sum for thread in warp
|
float tmp = 0.0f; // partial sum for thread in warp
|
||||||
|
|
||||||
for (int col = tid; col < ncols; col += block_size) {
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
const float xi = x[row*ncols + col];
|
const float xi = x[col];
|
||||||
tmp += xi * xi;
|
tmp += xi * xi;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum up partial sums
|
// sum up partial sums
|
||||||
tmp = warp_reduce_sum(tmp);
|
tmp = warp_reduce_sum(tmp);
|
||||||
if (block_size > WARP_SIZE) {
|
if constexpr (block_size > WARP_SIZE) {
|
||||||
|
static_assert(block_size == 1024, "unexpected block_size");
|
||||||
__shared__ float s_sum[32];
|
__shared__ float s_sum[32];
|
||||||
int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
int lane_id = threadIdx.x % WARP_SIZE;
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
if (lane_id == 0) {
|
if (lane_id == 0) {
|
||||||
s_sum[warp_id] = tmp;
|
s_sum[warp_id] = tmp;
|
||||||
}
|
}
|
||||||
|
@ -127,12 +130,63 @@ static __global__ void rms_norm_f32(const float * x, float * dst, const int ncol
|
||||||
const float scale = rsqrtf(mean + eps);
|
const float scale = rsqrtf(mean + eps);
|
||||||
|
|
||||||
for (int col = tid; col < ncols; col += block_size) {
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
dst[row*ncols + col] = scale * x[row*ncols + col];
|
dst[col] = scale * x[col];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int block_size>
|
||||||
|
static __global__ void rms_norm_back_f32(
|
||||||
|
const float * grad, const float * xf, float * dst, const int ncols, const float eps) {
|
||||||
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
|
||||||
|
grad += int64_t(row)*ncols;
|
||||||
|
xf += int64_t(row)*ncols;
|
||||||
|
dst += int64_t(row)*ncols;
|
||||||
|
|
||||||
|
float sum_xx = 0.0f; // sum for squares of x, equivalent to forward pass
|
||||||
|
float sum_xg = 0.0f; // sum for x * gradient, needed because RMS norm mixes inputs
|
||||||
|
|
||||||
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
|
const float xfi = xf[col];
|
||||||
|
sum_xx += xfi * xfi;
|
||||||
|
sum_xg += xfi * grad[col];
|
||||||
|
}
|
||||||
|
|
||||||
|
// sum up partial sums
|
||||||
|
sum_xx = warp_reduce_sum(sum_xx);
|
||||||
|
sum_xg = warp_reduce_sum(sum_xg);
|
||||||
|
if constexpr (block_size > WARP_SIZE) {
|
||||||
|
static_assert(block_size == 1024, "unexpected block_size");
|
||||||
|
__shared__ float s_sum_xx[32];
|
||||||
|
__shared__ float s_sum_xg[32];
|
||||||
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
|
if (lane_id == 0) {
|
||||||
|
s_sum_xx[warp_id] = sum_xx;
|
||||||
|
s_sum_xg[warp_id] = sum_xg;
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
sum_xx = s_sum_xx[lane_id];
|
||||||
|
sum_xx = warp_reduce_sum(sum_xx);
|
||||||
|
|
||||||
|
sum_xg = s_sum_xg[lane_id];
|
||||||
|
sum_xg = warp_reduce_sum(sum_xg);
|
||||||
|
}
|
||||||
|
|
||||||
|
const float mean_eps = sum_xx / ncols + eps;
|
||||||
|
const float sum_eps = sum_xx + ncols*eps;
|
||||||
|
|
||||||
|
const float scale_grad = rsqrtf(mean_eps);
|
||||||
|
const float scale_x = -scale_grad * sum_xg/sum_eps;
|
||||||
|
|
||||||
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
|
dst[col] = scale_grad*grad[col] + scale_x*xf[col];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) {
|
static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % WARP_SIZE == 0);
|
|
||||||
if (ncols < 1024) {
|
if (ncols < 1024) {
|
||||||
const dim3 block_dims(WARP_SIZE, 1, 1);
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
norm_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps);
|
norm_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps);
|
||||||
|
@ -142,7 +196,8 @@ static void norm_f32_cuda(const float * x, float * dst, const int ncols, const i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void group_norm_f32_cuda(const float * x, float * dst, const int num_groups, const float eps, const int group_size, const int ne_elements, cudaStream_t stream) {
|
static void group_norm_f32_cuda(
|
||||||
|
const float * x, float * dst, const int num_groups, const float eps, const int group_size, const int ne_elements, cudaStream_t stream) {
|
||||||
if (group_size < 1024) {
|
if (group_size < 1024) {
|
||||||
const dim3 block_dims(WARP_SIZE, 1, 1);
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
group_norm_f32<WARP_SIZE><<<num_groups, block_dims, 0, stream>>>(x, dst, group_size, ne_elements, eps);
|
group_norm_f32<WARP_SIZE><<<num_groups, block_dims, 0, stream>>>(x, dst, group_size, ne_elements, eps);
|
||||||
|
@ -153,7 +208,6 @@ static void group_norm_f32_cuda(const float * x, float * dst, const int num_grou
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) {
|
static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % WARP_SIZE == 0);
|
|
||||||
if (ncols < 1024) {
|
if (ncols < 1024) {
|
||||||
const dim3 block_dims(WARP_SIZE, 1, 1);
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
rms_norm_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps);
|
rms_norm_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(x, dst, ncols, eps);
|
||||||
|
@ -163,6 +217,16 @@ static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, con
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rms_norm_back_f32_cuda(const float * grad, const float * xf, float * dst, const int ncols, const int nrows, const float eps, cudaStream_t stream) {
|
||||||
|
if (ncols < 1024) {
|
||||||
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
|
rms_norm_back_f32<WARP_SIZE><<<nrows, block_dims, 0, stream>>>(grad, xf, dst, ncols, eps);
|
||||||
|
} else {
|
||||||
|
const dim3 block_dims(1024, 1, 1);
|
||||||
|
rms_norm_back_f32<1024><<<nrows, block_dims, 0, stream>>>(grad, xf, dst, ncols, eps);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
@ -179,6 +243,7 @@ void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, dst->op_params, sizeof(float));
|
memcpy(&eps, dst->op_params, sizeof(float));
|
||||||
|
GGML_ASSERT(eps >= 0.0f);
|
||||||
|
|
||||||
norm_f32_cuda(src0_d, dst_d, ne00, nrows, eps, stream);
|
norm_f32_cuda(src0_d, dst_d, ne00, nrows, eps, stream);
|
||||||
}
|
}
|
||||||
|
@ -198,6 +263,7 @@ void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
|
||||||
|
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, dst->op_params + 1, sizeof(float));
|
memcpy(&eps, dst->op_params + 1, sizeof(float));
|
||||||
|
GGML_ASSERT(eps >= 0.0f);
|
||||||
|
|
||||||
int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
|
int group_size = src0->ne[0] * src0->ne[1] * ((src0->ne[2] + num_groups - 1) / num_groups);
|
||||||
group_norm_f32_cuda(src0_d, dst_d, num_groups * src0->ne[3], eps, group_size, ggml_nelements(src0), stream);
|
group_norm_f32_cuda(src0_d, dst_d, num_groups * src0->ne[3], eps, group_size, ggml_nelements(src0), stream);
|
||||||
|
@ -219,6 +285,33 @@ void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, dst->op_params, sizeof(float));
|
memcpy(&eps, dst->op_params, sizeof(float));
|
||||||
|
GGML_ASSERT(eps >= 0.0f);
|
||||||
|
|
||||||
rms_norm_f32_cuda(src0_d, dst_d, ne00, nrows, eps, stream);
|
rms_norm_f32_cuda(src0_d, dst_d, ne00, nrows, eps, stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * grad = dst->src[0]; // gradients
|
||||||
|
const ggml_tensor * src0f = dst->src[1]; // src0 from forward pass
|
||||||
|
|
||||||
|
const float * grad_d = (const float *) grad->data;
|
||||||
|
const float * src0f_d = (const float *) src0f->data;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(grad));
|
||||||
|
|
||||||
|
GGML_ASSERT( grad->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(src0f->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
const int64_t ne00 = src0f->ne[0];
|
||||||
|
const int64_t nrows = ggml_nrows(src0f);
|
||||||
|
|
||||||
|
float eps;
|
||||||
|
memcpy(&eps, dst->op_params, sizeof(float));
|
||||||
|
GGML_ASSERT(eps >= 0.0f);
|
||||||
|
|
||||||
|
rms_norm_back_f32_cuda(grad_d, src0f_d, dst_d, ne00, nrows, eps, stream);
|
||||||
|
}
|
||||||
|
|
|
@ -5,3 +5,5 @@ void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
|
@ -11,16 +11,15 @@ void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
|
||||||
GGML_ASSERT(ggml_is_contiguous(dst));
|
|
||||||
|
|
||||||
GGML_ASSERT(ne01 == ne11);
|
GGML_ASSERT(ne01 == ne11);
|
||||||
GGML_ASSERT(ne0 == ne00);
|
GGML_ASSERT(ne0 == ne00);
|
||||||
GGML_ASSERT(ne1 == ne10);
|
GGML_ASSERT(ne1 == ne10);
|
||||||
|
|
||||||
GGML_ASSERT(ne2 == src0->ne[2]);
|
GGML_ASSERT(ne2 % src0->ne[2] == 0);
|
||||||
|
GGML_ASSERT(ne3 % src0->ne[3] == 0);
|
||||||
|
|
||||||
GGML_ASSERT(ne2 == src1->ne[2]);
|
GGML_ASSERT(ne2 == src1->ne[2]);
|
||||||
GGML_ASSERT(ne3 == src0->ne[3]);
|
|
||||||
GGML_ASSERT(ne3 == src1->ne[3]);
|
GGML_ASSERT(ne3 == src1->ne[3]);
|
||||||
|
|
||||||
const float * src0_d = (const float *) src0->data;
|
const float * src0_d = (const float *) src0->data;
|
||||||
|
@ -33,8 +32,6 @@ void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const float alpha = 1.0f;
|
const float alpha = 1.0f;
|
||||||
const float beta = 0.0f;
|
const float beta = 0.0f;
|
||||||
|
|
||||||
GGML_ASSERT(ne2 == 1);
|
|
||||||
GGML_ASSERT(ne3 == 1);
|
|
||||||
CUBLAS_CHECK(cublasSetStream(handle, stream));
|
CUBLAS_CHECK(cublasSetStream(handle, stream));
|
||||||
|
|
||||||
const bool src1_T = ggml_is_transposed(src1);
|
const bool src1_T = ggml_is_transposed(src1);
|
||||||
|
@ -42,10 +39,27 @@ void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const int64_t ldb = (src1_T ? nb10 : nb11) / sizeof(float);
|
const int64_t ldb = (src1_T ? nb10 : nb11) / sizeof(float);
|
||||||
GGML_ASSERT( (src1_T ? nb11 : nb10) == sizeof(float));
|
GGML_ASSERT( (src1_T ? nb11 : nb10) == sizeof(float));
|
||||||
|
|
||||||
CUBLAS_CHECK(
|
// data strides in dimensions 2/3
|
||||||
cublasSgemm(handle, CUBLAS_OP_N, src1_cublas_op,
|
const size_t s02 = nb02 / sizeof(float);
|
||||||
ne0, ne1, ne01,
|
const size_t s03 = nb03 / sizeof(float);
|
||||||
&alpha, src0_d, ne00,
|
const size_t s12 = nb12 / sizeof(float);
|
||||||
src1_d, ldb,
|
const size_t s13 = nb13 / sizeof(float);
|
||||||
&beta, dst_d, ne0));
|
const size_t s2 = nb2 / sizeof(float);
|
||||||
|
const size_t s3 = nb3 / sizeof(float);
|
||||||
|
|
||||||
|
// dps == dst per src0, used for group query attention
|
||||||
|
const int64_t dps2 = ne2 / ne02;
|
||||||
|
const int64_t dps3 = ne3 / ne03;
|
||||||
|
|
||||||
|
// TODO batched matrix multiplication
|
||||||
|
for (int64_t i3 = 0; i3 < ne3; ++i3) {
|
||||||
|
for (int64_t i2 = 0; i2 < ne2; ++i2) {
|
||||||
|
CUBLAS_CHECK(
|
||||||
|
cublasSgemm(handle, CUBLAS_OP_N, src1_cublas_op,
|
||||||
|
ne0, ne1, ne01,
|
||||||
|
&alpha, src0_d + (i3/dps3)*s03 + (i2/dps2)*s02, ne00,
|
||||||
|
src1_d + i3 *s13 + i2 *s12, ldb,
|
||||||
|
&beta, dst_d + i3 *s3 + i2 *s2, ne0));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,9 +39,9 @@ static __device__ void rope_yarn(
|
||||||
|
|
||||||
template<bool forward, bool has_ff, typename T>
|
template<bool forward, bool has_ff, typename T>
|
||||||
static __global__ void rope_norm(
|
static __global__ void rope_norm(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims,
|
const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float ext_factor, const float attn_factor,
|
const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float theta_scale, const float * __restrict__ freq_factors) {
|
const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors) {
|
||||||
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
||||||
|
|
||||||
if (i0 >= ne0) {
|
if (i0 >= ne0) {
|
||||||
|
@ -83,9 +83,9 @@ static __global__ void rope_norm(
|
||||||
|
|
||||||
template<bool forward, bool has_ff, typename T>
|
template<bool forward, bool has_ff, typename T>
|
||||||
static __global__ void rope_neox(
|
static __global__ void rope_neox(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims,
|
const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float ext_factor, const float attn_factor,
|
const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float theta_scale, const float * __restrict__ freq_factors) {
|
const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors) {
|
||||||
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
||||||
|
|
||||||
if (i0 >= ne0) {
|
if (i0 >= ne0) {
|
||||||
|
@ -127,9 +127,9 @@ static __global__ void rope_neox(
|
||||||
|
|
||||||
template<bool forward, bool has_ff, typename T>
|
template<bool forward, bool has_ff, typename T>
|
||||||
static __global__ void rope_multi(
|
static __global__ void rope_multi(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2,
|
const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2,
|
||||||
const int n_dims, const int32_t * __restrict__ pos, const float freq_scale, const float ext_factor, const float attn_factor,
|
const int n_dims, const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float theta_scale, const float * __restrict__ freq_factors, const mrope_sections sections) {
|
const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors, const mrope_sections sections) {
|
||||||
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
||||||
|
|
||||||
if (i0 >= ne0) {
|
if (i0 >= ne0) {
|
||||||
|
@ -187,9 +187,9 @@ static __global__ void rope_multi(
|
||||||
|
|
||||||
template<bool forward, bool has_ff, typename T>
|
template<bool forward, bool has_ff, typename T>
|
||||||
static __global__ void rope_vision(
|
static __global__ void rope_vision(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims,
|
const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims,
|
const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims,
|
||||||
const float theta_scale, const float * __restrict__ freq_factors, const mrope_sections sections) {
|
const float theta_scale, const float * freq_factors, const mrope_sections sections) {
|
||||||
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
||||||
|
|
||||||
if (i0 >= ne0) {
|
if (i0 >= ne0) {
|
||||||
|
@ -234,9 +234,9 @@ static __global__ void rope_vision(
|
||||||
|
|
||||||
template<bool forward, typename T>
|
template<bool forward, typename T>
|
||||||
static void rope_norm_cuda(
|
static void rope_norm_cuda(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr,
|
const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float * __restrict__ freq_factors, cudaStream_t stream) {
|
const rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ne0 % 2 == 0);
|
GGML_ASSERT(ne0 % 2 == 0);
|
||||||
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
||||||
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
||||||
|
@ -257,9 +257,9 @@ static void rope_norm_cuda(
|
||||||
|
|
||||||
template<bool forward, typename T>
|
template<bool forward, typename T>
|
||||||
static void rope_neox_cuda(
|
static void rope_neox_cuda(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr,
|
const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float * __restrict__ freq_factors, cudaStream_t stream) {
|
const rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ne0 % 2 == 0);
|
GGML_ASSERT(ne0 % 2 == 0);
|
||||||
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
||||||
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
||||||
|
@ -280,9 +280,9 @@ static void rope_neox_cuda(
|
||||||
|
|
||||||
template<bool forward, typename T>
|
template<bool forward, typename T>
|
||||||
static void rope_multi_cuda(
|
static void rope_multi_cuda(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int nr,
|
const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int nr,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float * __restrict__ freq_factors, const mrope_sections sections, cudaStream_t stream) {
|
const rope_corr_dims corr_dims, const float * freq_factors, const mrope_sections sections, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ne0 % 2 == 0);
|
GGML_ASSERT(ne0 % 2 == 0);
|
||||||
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
||||||
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
||||||
|
@ -303,9 +303,9 @@ static void rope_multi_cuda(
|
||||||
|
|
||||||
template<bool forward, typename T>
|
template<bool forward, typename T>
|
||||||
static void rope_vision_cuda(
|
static void rope_vision_cuda(
|
||||||
const T * __restrict__ x, T * __restrict__ dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int nr,
|
const T * x, T * dst, const int ne0, const int ne1, const int ne2, const int s1, const int s2, const int n_dims, const int nr,
|
||||||
const int32_t * __restrict__ pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor,
|
||||||
const rope_corr_dims corr_dims, const float * __restrict__ freq_factors, const mrope_sections sections, cudaStream_t stream) {
|
const rope_corr_dims corr_dims, const float * freq_factors, const mrope_sections sections, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ne0 % 2 == 0);
|
GGML_ASSERT(ne0 % 2 == 0);
|
||||||
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
|
||||||
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
#include "common.cuh"
|
#include "common.cuh"
|
||||||
|
#include "ggml.h"
|
||||||
#include "softmax.cuh"
|
#include "softmax.cuh"
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static __device__ __forceinline__ float t2f32(T val) {
|
static __device__ __forceinline__ float t2f32(T val) {
|
||||||
|
@ -11,14 +13,20 @@ __device__ float __forceinline__ t2f32<half>(half val) {
|
||||||
return __half2float(val);
|
return __half2float(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
template <bool use_shared, int ncols_template, int block_size_template, typename T>
|
||||||
static __global__ void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) {
|
static __global__ void soft_max_f32(
|
||||||
|
const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y,
|
||||||
|
const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) {
|
||||||
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
||||||
|
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
const int rowx = blockIdx.x;
|
const int rowx = blockIdx.x;
|
||||||
const int rowy = rowx % nrows_y; // broadcast the mask in the row dimension
|
const int rowy = rowx % nrows_y; // broadcast the mask in the row dimension
|
||||||
|
|
||||||
|
x += int64_t(rowx)*ncols;
|
||||||
|
mask += int64_t(rowy)*ncols * (mask != nullptr);
|
||||||
|
dst += int64_t(rowx)*ncols;
|
||||||
|
|
||||||
const int block_size = block_size_template == 0 ? blockDim.x : block_size_template;
|
const int block_size = block_size_template == 0 ? blockDim.x : block_size_template;
|
||||||
|
|
||||||
const int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
|
@ -29,7 +37,7 @@ static __global__ void soft_max_f32(const float * x, const T * mask, float * dst
|
||||||
extern __shared__ float data_soft_max_f32[];
|
extern __shared__ float data_soft_max_f32[];
|
||||||
float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication
|
float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication
|
||||||
// shared memory buffer to cache values between iterations:
|
// shared memory buffer to cache values between iterations:
|
||||||
float * vals = vals_smem ? buf_iw + WARP_SIZE : dst + (int64_t)rowx*ncols;
|
float * vals = use_shared ? buf_iw + WARP_SIZE : dst;
|
||||||
|
|
||||||
float max_val = -INFINITY;
|
float max_val = -INFINITY;
|
||||||
|
|
||||||
|
@ -41,10 +49,7 @@ static __global__ void soft_max_f32(const float * x, const T * mask, float * dst
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t ix = (int64_t)rowx*ncols + col;
|
const float val = x[col]*scale + (mask ? slope*t2f32(mask[col]) : 0.0f);
|
||||||
const int64_t iy = (int64_t)rowy*ncols + col;
|
|
||||||
|
|
||||||
const float val = x[ix]*scale + (mask ? slope*t2f32(mask[iy]) : 0.0f);
|
|
||||||
|
|
||||||
vals[col] = val;
|
vals[col] = val;
|
||||||
max_val = max(max_val, val);
|
max_val = max(max_val, val);
|
||||||
|
@ -110,8 +115,29 @@ static __global__ void soft_max_f32(const float * x, const T * mask, float * dst
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t idst = (int64_t)rowx*ncols + col;
|
dst[col] = vals[col] * inv_sum;
|
||||||
dst[idst] = vals[col] * inv_sum;
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __global__ void soft_max_back_f32(
|
||||||
|
const float * grad, const float * dstf, float * dst, const int ncols, const float scale) {
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int rowx = blockIdx.x;
|
||||||
|
|
||||||
|
grad += int64_t(rowx)*ncols;
|
||||||
|
dstf += int64_t(rowx)*ncols;
|
||||||
|
dst += int64_t(rowx)*ncols;
|
||||||
|
|
||||||
|
float dgf_dot = 0.0f; // dot product of dst from forward pass and gradients
|
||||||
|
|
||||||
|
for (int col = tid; col < ncols; col += WARP_SIZE) {
|
||||||
|
dgf_dot += dstf[col]*grad[col];
|
||||||
|
}
|
||||||
|
|
||||||
|
dgf_dot = warp_reduce_sum(dgf_dot);
|
||||||
|
|
||||||
|
for (int col = tid; col < ncols; col += WARP_SIZE) {
|
||||||
|
dst[col] = scale * (grad[col] - dgf_dot) * dstf[col];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +147,7 @@ static void soft_max_f32_cuda(const float * x, const T * mask, float * dst, cons
|
||||||
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
||||||
const dim3 block_dims(nth, 1, 1);
|
const dim3 block_dims(nth, 1, 1);
|
||||||
const dim3 block_nums(nrows_x, 1, 1);
|
const dim3 block_nums(nrows_x, 1, 1);
|
||||||
const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
const size_t nbytes_shared = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
||||||
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
||||||
|
|
||||||
const uint32_t n_head = nrows_x/nrows_y;
|
const uint32_t n_head = nrows_x/nrows_y;
|
||||||
|
@ -131,50 +157,68 @@ static void soft_max_f32_cuda(const float * x, const T * mask, float * dst, cons
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
// FIXME: this limit could be raised by ~2-4x on Ampere or newer
|
// FIXME: this limit could be raised by ~2-4x on Ampere or newer
|
||||||
if (shmem < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) {
|
if (nbytes_shared < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) {
|
||||||
switch (ncols_x) {
|
switch (ncols_x) {
|
||||||
case 32:
|
case 32:
|
||||||
soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 32, 32><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 64:
|
case 64:
|
||||||
soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 64, 64><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 128:
|
case 128:
|
||||||
soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 128, 128><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 256:
|
case 256:
|
||||||
soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 256, 256><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 512:
|
case 512:
|
||||||
soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 512, 512><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 1024:
|
case 1024:
|
||||||
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 2048:
|
case 2048:
|
||||||
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 4096:
|
case 4096:
|
||||||
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 0, 0><<<block_nums, block_dims, nbytes_shared, stream>>>
|
||||||
|
(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const size_t shmem_low = WARP_SIZE*sizeof(float);
|
const size_t nbytes_shared_low = WARP_SIZE*sizeof(float);
|
||||||
soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<false, 0, 0><<<block_nums, block_dims, nbytes_shared_low, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void soft_max_back_f32_cuda(
|
||||||
|
const float * grad, const float * dstf, float * dst,
|
||||||
|
const int ncols, const int nrows, const float scale, cudaStream_t stream) {
|
||||||
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
|
const dim3 block_nums(nrows, 1, 1);
|
||||||
|
|
||||||
|
soft_max_back_f32<<<block_nums, block_dims, 0, stream>>>(grad, dstf, dst, ncols, scale);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const ggml_tensor * src1 = dst->src[1];
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
|
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *) src0->data;
|
||||||
const void * src1_d = src1 ? (const void *)src1->data : nullptr;
|
const void * src1_d = src1 ? (const void *) src1->data : nullptr;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
float * dst_d = (float *)dst->data;
|
|
||||||
cudaStream_t stream = ctx.stream();
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
@ -189,18 +233,42 @@ void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
float scale = 1.0f;
|
float scale = 1.0f;
|
||||||
float max_bias = 0.0f;
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
|
||||||
memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
|
memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
||||||
|
|
||||||
if (use_f16) {
|
if (use_f16) {
|
||||||
const half * src1_dd = (const half *)src1_d;
|
soft_max_f32_cuda(src0_d, (const half *) src1_d, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
||||||
|
|
||||||
soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
|
||||||
} else {
|
} else {
|
||||||
const float * src1_dd = (const float *)src1_d;
|
soft_max_f32_cuda(src0_d, (const float *) src1_d, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
||||||
|
|
||||||
soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_soft_max_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0]; // grad
|
||||||
|
const ggml_tensor * src1 = dst->src[1]; // forward pass output
|
||||||
|
|
||||||
|
const float * src0_d = (const float *) src0->data;
|
||||||
|
const float * src1_d = (const float *) src1->data;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
const int64_t ncols = src0->ne[0];
|
||||||
|
const int64_t nrows = ggml_nrows(src0);
|
||||||
|
|
||||||
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
GGML_ASSERT(max_bias == 0.0f);
|
||||||
|
|
||||||
|
soft_max_back_f32_cuda(src0_d, src1_d, dst_d, ncols, nrows, scale, stream);
|
||||||
|
}
|
||||||
|
|
|
@ -3,3 +3,5 @@
|
||||||
#define CUDA_SOFT_MAX_BLOCK_SIZE 1024
|
#define CUDA_SOFT_MAX_BLOCK_SIZE 1024
|
||||||
|
|
||||||
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_op_soft_max_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
|
@ -51,6 +51,19 @@ static __global__ void silu_f32(const float * x, float * dst, const int k) {
|
||||||
dst[i] = x[i] / (1.0f + expf(-x[i]));
|
dst[i] = x[i] / (1.0f + expf(-x[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __global__ void silu_back_f32(
|
||||||
|
const float * grad, const float * xf, float * dst, const int k) {
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float xfi = xf[i];
|
||||||
|
const float s = 1.0f / (1.0f + expf(-xfi));
|
||||||
|
dst[i] = grad[i] * s * (1.0f + xfi * (1.0f - s));
|
||||||
|
}
|
||||||
|
|
||||||
static __global__ void tanh_f32(const float * x, float * dst, int k) {
|
static __global__ void tanh_f32(const float * x, float * dst, int k) {
|
||||||
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
if (i >= k) {
|
if (i >= k) {
|
||||||
|
@ -173,6 +186,11 @@ static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_
|
||||||
silu_f32<<<num_blocks, CUDA_SILU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
silu_f32<<<num_blocks, CUDA_SILU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void silu_back_f32_cuda(const float * grad, const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_SILU_BACK_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE;
|
||||||
|
silu_back_f32<<<num_blocks, CUDA_SILU_BACK_BLOCK_SIZE, 0, stream>>>(grad, x, dst, k);
|
||||||
|
}
|
||||||
|
|
||||||
static void tanh_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
static void tanh_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||||
const int num_blocks = (k + CUDA_TANH_BLOCK_SIZE - 1) / CUDA_TANH_BLOCK_SIZE;
|
const int num_blocks = (k + CUDA_TANH_BLOCK_SIZE - 1) / CUDA_TANH_BLOCK_SIZE;
|
||||||
tanh_f32<<<num_blocks, CUDA_TANH_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
tanh_f32<<<num_blocks, CUDA_TANH_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
|
@ -284,6 +302,24 @@ void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
silu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
silu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_silu_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0]; // input from forward pass
|
||||||
|
const ggml_tensor * src1 = dst->src[1]; // grads of forward pass output
|
||||||
|
|
||||||
|
const float * src0_d = (const float *) src0->data;
|
||||||
|
const float * src1_d = (const float *) src1->data;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
silu_back_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(src0), stream);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#define CUDA_STEP_BLOCK_SIZE 256
|
#define CUDA_STEP_BLOCK_SIZE 256
|
||||||
#define CUDA_GELU_BLOCK_SIZE 256
|
#define CUDA_GELU_BLOCK_SIZE 256
|
||||||
#define CUDA_SILU_BLOCK_SIZE 256
|
#define CUDA_SILU_BLOCK_SIZE 256
|
||||||
|
#define CUDA_SILU_BACK_BLOCK_SIZE 256
|
||||||
#define CUDA_TANH_BLOCK_SIZE 256
|
#define CUDA_TANH_BLOCK_SIZE 256
|
||||||
#define CUDA_RELU_BLOCK_SIZE 256
|
#define CUDA_RELU_BLOCK_SIZE 256
|
||||||
#define CUDA_SIGMOID_BLOCK_SIZE 256
|
#define CUDA_SIGMOID_BLOCK_SIZE 256
|
||||||
|
@ -23,6 +24,8 @@ void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_op_silu_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1794,6 +1794,42 @@ const uint64_t contig_cpy_f32_f16_len = 3280;
|
||||||
extern unsigned char contig_cpy_f16_f16_data[3184];
|
extern unsigned char contig_cpy_f16_f16_data[3184];
|
||||||
const uint64_t contig_cpy_f16_f16_len = 3184;
|
const uint64_t contig_cpy_f16_f16_len = 3184;
|
||||||
|
|
||||||
|
extern unsigned char cpy_f32_q4_0_data[15164];
|
||||||
|
const uint64_t cpy_f32_q4_0_len = 15164;
|
||||||
|
|
||||||
|
extern unsigned char cpy_q4_0_f32_data[10616];
|
||||||
|
const uint64_t cpy_q4_0_f32_len = 10616;
|
||||||
|
|
||||||
|
extern unsigned char cpy_f32_q4_1_data[15832];
|
||||||
|
const uint64_t cpy_f32_q4_1_len = 15832;
|
||||||
|
|
||||||
|
extern unsigned char cpy_q4_1_f32_data[10708];
|
||||||
|
const uint64_t cpy_q4_1_f32_len = 10708;
|
||||||
|
|
||||||
|
extern unsigned char cpy_f32_q5_0_data[18660];
|
||||||
|
const uint64_t cpy_f32_q5_0_len = 18660;
|
||||||
|
|
||||||
|
extern unsigned char cpy_q5_0_f32_data[14080];
|
||||||
|
const uint64_t cpy_q5_0_f32_len = 14080;
|
||||||
|
|
||||||
|
extern unsigned char cpy_f32_q5_1_data[17616];
|
||||||
|
const uint64_t cpy_f32_q5_1_len = 17616;
|
||||||
|
|
||||||
|
extern unsigned char cpy_q5_1_f32_data[14000];
|
||||||
|
const uint64_t cpy_q5_1_f32_len = 14000;
|
||||||
|
|
||||||
|
extern unsigned char cpy_f32_q8_0_data[12460];
|
||||||
|
const uint64_t cpy_f32_q8_0_len = 12460;
|
||||||
|
|
||||||
|
extern unsigned char cpy_q8_0_f32_data[11636];
|
||||||
|
const uint64_t cpy_q8_0_f32_len = 11636;
|
||||||
|
|
||||||
|
extern unsigned char cpy_f32_iq4_nl_data[13684];
|
||||||
|
const uint64_t cpy_f32_iq4_nl_len = 13684;
|
||||||
|
|
||||||
|
extern unsigned char cpy_iq4_nl_f32_data[11880];
|
||||||
|
const uint64_t cpy_iq4_nl_f32_len = 11880;
|
||||||
|
|
||||||
extern unsigned char add_f32_data[5916];
|
extern unsigned char add_f32_data[5916];
|
||||||
const uint64_t add_f32_len = 5916;
|
const uint64_t add_f32_len = 5916;
|
||||||
|
|
||||||
|
|
|
@ -232,6 +232,8 @@ struct vk_device_struct {
|
||||||
vk_pipeline pipeline_repeat_f32;
|
vk_pipeline pipeline_repeat_f32;
|
||||||
vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
|
vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
|
||||||
vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16;
|
vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16;
|
||||||
|
vk_pipeline pipeline_cpy_f32_quant[GGML_TYPE_COUNT];
|
||||||
|
vk_pipeline pipeline_cpy_quant_f32[GGML_TYPE_COUNT];
|
||||||
vk_pipeline pipeline_norm_f32;
|
vk_pipeline pipeline_norm_f32;
|
||||||
vk_pipeline pipeline_group_norm_f32;
|
vk_pipeline pipeline_group_norm_f32;
|
||||||
vk_pipeline pipeline_rms_norm_f32;
|
vk_pipeline pipeline_rms_norm_f32;
|
||||||
|
@ -1969,6 +1971,20 @@ static void ggml_vk_load_shaders(vk_device& device) {
|
||||||
ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
|
ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
|
||||||
ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
|
ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
|
||||||
|
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_0], "cpy_f32_q4_0", cpy_f32_q4_0_len, cpy_f32_q4_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_0), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_1], "cpy_f32_q4_1", cpy_f32_q4_1_len, cpy_f32_q4_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_1), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_0], "cpy_f32_q5_0", cpy_f32_q5_0_len, cpy_f32_q5_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_0), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_1], "cpy_f32_q5_1", cpy_f32_q5_1_len, cpy_f32_q5_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_1), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q8_0], "cpy_f32_q8_0", cpy_f32_q8_0_len, cpy_f32_q8_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q8_0), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_IQ4_NL], "cpy_f32_iq4_nl", cpy_f32_iq4_nl_len, cpy_f32_iq4_nl_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_IQ4_NL), 1, 1}, {}, 1);
|
||||||
|
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_0], "cpy_q4_0_f32", cpy_q4_0_f32_len, cpy_q4_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_0), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_1], "cpy_q4_1_f32", cpy_q4_1_f32_len, cpy_q4_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_1), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_0], "cpy_q5_0_f32", cpy_q5_0_f32_len, cpy_q5_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_0), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_1], "cpy_q5_1_f32", cpy_q5_1_f32_len, cpy_q5_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_1), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q8_0], "cpy_q8_0_f32", cpy_q8_0_f32_len, cpy_q8_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q8_0), 1, 1}, {}, 1);
|
||||||
|
ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_IQ4_NL], "cpy_iq4_nl_f32", cpy_iq4_nl_f32_len, cpy_iq4_nl_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_IQ4_NL), 1, 1}, {}, 1);
|
||||||
|
|
||||||
ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
|
ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
|
||||||
ggml_vk_create_pipeline(device, device->pipeline_add_f32_norepeat, "add_f32_norepeat", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
|
ggml_vk_create_pipeline(device, device->pipeline_add_f32_norepeat, "add_f32_norepeat", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
|
||||||
ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
|
ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
|
||||||
|
@ -3697,6 +3713,33 @@ static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const
|
||||||
return ctx->device->pipeline_cpy_f16_f16;
|
return ctx->device->pipeline_cpy_f16_f16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (src->type == GGML_TYPE_F32) {
|
||||||
|
switch (to) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
return ctx->device->pipeline_cpy_f32_quant[to];
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (to == GGML_TYPE_F32) {
|
||||||
|
switch (src->type) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
return ctx->device->pipeline_cpy_quant_f32[src->type];
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
|
std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
|
||||||
GGML_ABORT("fatal error");
|
GGML_ABORT("fatal error");
|
||||||
|
@ -5168,7 +5211,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
|
||||||
}
|
}
|
||||||
std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
|
std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
|
||||||
std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
|
std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
|
||||||
GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
|
GGML_ASSERT(op == GGML_OP_GET_ROWS || op == GGML_OP_CPY || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
|
||||||
GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
|
GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
|
||||||
GGML_ASSERT(dst->buffer != nullptr);
|
GGML_ASSERT(dst->buffer != nullptr);
|
||||||
const uint64_t ne00 = src0->ne[0];
|
const uint64_t ne00 = src0->ne[0];
|
||||||
|
@ -7913,12 +7956,36 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
|
||||||
{
|
{
|
||||||
ggml_type src0_type = op->src[0]->type;
|
ggml_type src0_type = op->src[0]->type;
|
||||||
ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
|
ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
|
||||||
if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
|
|
||||||
return true;
|
if (src0_type == GGML_TYPE_F32) {
|
||||||
|
switch (src1_type) {
|
||||||
|
case GGML_TYPE_F32:
|
||||||
|
case GGML_TYPE_F16:
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
|
if (src1_type == GGML_TYPE_F32) {
|
||||||
return true;
|
switch (src0_type) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
|
if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
51
ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp
Normal file
51
ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "types.comp"
|
||||||
|
#include "generic_unary_head.comp"
|
||||||
|
#include "dequant_funcs.comp"
|
||||||
|
|
||||||
|
#if defined(DATA_A_IQ4_NL)
|
||||||
|
// 16 invocations needed for init_iq4nl_shmem
|
||||||
|
layout(local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
#else
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
#if defined(DATA_A_IQ4_NL)
|
||||||
|
init_iq4nl_shmem();
|
||||||
|
if (gl_LocalInvocationIndex.x != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const uint idx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x * QUANT_K;
|
||||||
|
|
||||||
|
if (idx >= p.ne) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint dst_idx = get_doffset() + dst_idx(idx);
|
||||||
|
uint src_idx = src0_idx_quant(idx, QUANT_K);
|
||||||
|
|
||||||
|
const uint a_offset = 0;
|
||||||
|
const uint ib = src_idx;
|
||||||
|
const vec2 dm = get_dm(ib, a_offset);
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K; j += 4) {
|
||||||
|
vec4 v = dequantize4(ib, j / QUANT_R, a_offset);
|
||||||
|
v = v * dm.x + vec4(dm.y);
|
||||||
|
|
||||||
|
#if QUANT_R == 2
|
||||||
|
data_d[dst_idx + j/2 + 0] = v[0];
|
||||||
|
data_d[dst_idx + j/2 + QUANT_K/2 + 0] = v[1];
|
||||||
|
data_d[dst_idx + j/2 + 1] = v[2];
|
||||||
|
data_d[dst_idx + j/2 + QUANT_K/2 + 1] = v[3];
|
||||||
|
#else
|
||||||
|
data_d[dst_idx + j + 0] = v[0];
|
||||||
|
data_d[dst_idx + j + 1] = v[1];
|
||||||
|
data_d[dst_idx + j + 2] = v[2];
|
||||||
|
data_d[dst_idx + j + 3] = v[3];
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
237
ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp
Normal file
237
ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
#version 450
|
||||||
|
|
||||||
|
#include "types.comp"
|
||||||
|
#include "generic_unary_head.comp"
|
||||||
|
|
||||||
|
#if defined(DATA_A_IQ4_NL)
|
||||||
|
// 16 invocations needed for init_iq4nl_shmem
|
||||||
|
layout(local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
#else
|
||||||
|
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
layout (binding = 0) readonly buffer S {float data_s[];};
|
||||||
|
layout (binding = 1) writeonly buffer Q {A_TYPE data_q[];};
|
||||||
|
|
||||||
|
#if defined(DATA_A_Q4_0)
|
||||||
|
void quantize(uint dst_idx, uint src_idx)
|
||||||
|
{
|
||||||
|
float amax = 0.0;
|
||||||
|
float vmax = 0.0;
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q4_0; ++j) {
|
||||||
|
const float v = data_s[src_idx + j];
|
||||||
|
if (amax < abs(v)) {
|
||||||
|
amax = abs(v);
|
||||||
|
vmax = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = vmax / -8;
|
||||||
|
const float id = (d != 0.0) ? 1.0/d : 0.0;
|
||||||
|
|
||||||
|
data_q[dst_idx].d = float16_t(d);
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q4_0/2; ++j) {
|
||||||
|
const float x0 = data_s[src_idx + 0 + j]*id;
|
||||||
|
const float x1 = data_s[src_idx + QUANT_K_Q4_0/2 + j]*id;
|
||||||
|
|
||||||
|
const uint xi0 = min(15, int(x0 + 8.5));
|
||||||
|
const uint xi1 = min(15, int(x1 + 8.5));
|
||||||
|
|
||||||
|
data_q[dst_idx].qs[j] = uint8_t(xi0 | (xi1 << 4));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(DATA_A_Q4_1)
|
||||||
|
void quantize(uint dst_idx, uint src_idx)
|
||||||
|
{
|
||||||
|
float vmin = 1.0/0.0;
|
||||||
|
float vmax = -vmin;
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q4_1; ++j) {
|
||||||
|
const float v = data_s[src_idx + j];
|
||||||
|
|
||||||
|
if (v < vmin) vmin = v;
|
||||||
|
if (v > vmax) vmax = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = (vmax - vmin) / ((1 << 4) - 1);
|
||||||
|
const float id = (d != 0.0) ? 1.0/d : 0.0;
|
||||||
|
|
||||||
|
data_q[dst_idx].d = float16_t(d);
|
||||||
|
data_q[dst_idx].m = float16_t(vmin);
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q4_1/2; ++j) {
|
||||||
|
const float x0 = (data_s[src_idx + 0 + j] - vmin)*id;
|
||||||
|
const float x1 = (data_s[src_idx + QUANT_K_Q4_1/2 + j] - vmin)*id;
|
||||||
|
|
||||||
|
const uint xi0 = min(15, int(x0 + 0.5));
|
||||||
|
const uint xi1 = min(15, int(x1 + 0.5));
|
||||||
|
|
||||||
|
data_q[dst_idx].qs[j] = uint8_t(xi0 | (xi1 << 4));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(DATA_A_Q5_0)
|
||||||
|
void quantize(uint dst_idx, uint src_idx)
|
||||||
|
{
|
||||||
|
float amax = 0.0;
|
||||||
|
float vmax = 0.0;
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q5_0; ++j) {
|
||||||
|
const float v = data_s[src_idx + j];
|
||||||
|
if (amax < abs(v)) {
|
||||||
|
amax = abs(v);
|
||||||
|
vmax = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = vmax / -16;
|
||||||
|
const float id = (d != 0.0) ? 1.0/d : 0.0;
|
||||||
|
|
||||||
|
data_q[dst_idx].d = float16_t(d);
|
||||||
|
|
||||||
|
uint32_t qh = 0;
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q5_0/2; ++j) {
|
||||||
|
const float x0 = data_s[src_idx + 0 + j]*id;
|
||||||
|
const float x1 = data_s[src_idx + QUANT_K_Q5_0/2 + j]*id;
|
||||||
|
|
||||||
|
const uint xi0 = min(31, int(x0 + 16.5));
|
||||||
|
const uint xi1 = min(31, int(x1 + 16.5));
|
||||||
|
|
||||||
|
data_q[dst_idx].qs[j] = uint8_t((xi0 & 0xf) | ((xi1 & 0xf) << 4));
|
||||||
|
qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
|
||||||
|
qh |= ((xi1 & 0x10u) >> 4) << (j + QUANT_K_Q5_0/2);
|
||||||
|
}
|
||||||
|
data_q[dst_idx].qh[0] = uint16_t(qh & 0xFFFF);
|
||||||
|
data_q[dst_idx].qh[1] = uint16_t(qh >> 16);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(DATA_A_Q5_1)
|
||||||
|
void quantize(uint dst_idx, uint src_idx)
|
||||||
|
{
|
||||||
|
float min = data_s[src_idx + 0];
|
||||||
|
float max = min;
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 1; j < QUANT_K_Q5_1; ++j) {
|
||||||
|
const float v = data_s[src_idx + j];
|
||||||
|
min = v < min ? v : min;
|
||||||
|
max = v > max ? v : max;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = (max - min) / 31;
|
||||||
|
const float id = (d != 0) ? 1.0/d : 0.0;
|
||||||
|
|
||||||
|
data_q[dst_idx].d = float16_t(d);
|
||||||
|
data_q[dst_idx].m = float16_t(min);
|
||||||
|
|
||||||
|
uint32_t qh = 0;
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q5_1/2; ++j) {
|
||||||
|
const float x0 = (data_s[src_idx + 0 + j] - min)*id;
|
||||||
|
const float x1 = (data_s[src_idx + QUANT_K_Q5_1/2 + j] - min)*id;
|
||||||
|
|
||||||
|
const uint xi0 = uint(x0 + 0.5);
|
||||||
|
const uint xi1 = uint(x1 + 0.5);
|
||||||
|
|
||||||
|
data_q[dst_idx].qs[j] = uint8_t((xi0 & 0xf) | ((xi1 & 0xf) << 4));
|
||||||
|
qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
|
||||||
|
qh |= ((xi1 & 0x10u) >> 4) << (j + QUANT_K_Q5_1/2);
|
||||||
|
}
|
||||||
|
data_q[dst_idx].qh = qh;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(DATA_A_Q8_0)
|
||||||
|
void quantize(uint dst_idx, uint src_idx)
|
||||||
|
{
|
||||||
|
float amax = 0.0; // absolute max
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q8_0; j++) {
|
||||||
|
const float v = data_s[src_idx + j];
|
||||||
|
amax = max(amax, abs(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = amax / ((1 << 7) - 1);
|
||||||
|
const float id = (d != 0.0) ? 1.0/d : 0.0;
|
||||||
|
|
||||||
|
data_q[dst_idx].d = float16_t(d);
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_Q8_0; ++j) {
|
||||||
|
const float x0 = data_s[src_idx + j]*id;
|
||||||
|
|
||||||
|
data_q[dst_idx].qs[j] = int8_t(round(x0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(DATA_A_IQ4_NL)
|
||||||
|
uint best_index(float x) {
|
||||||
|
if (x <= kvalues_iq4nl[0]) return 0;
|
||||||
|
if (x >= kvalues_iq4nl[15]) return 15;
|
||||||
|
int ml = 0, mu = 15;
|
||||||
|
while (mu-ml > 1) {
|
||||||
|
int mav = (ml+mu)/2;
|
||||||
|
if (x < kvalues_iq4nl[mav]) mu = mav; else ml = mav;
|
||||||
|
}
|
||||||
|
return x - kvalues_iq4nl[mu-1] < kvalues_iq4nl[mu] - x ? mu-1 : mu;
|
||||||
|
}
|
||||||
|
|
||||||
|
void quantize(uint dst_idx, uint src_idx)
|
||||||
|
{
|
||||||
|
float amax = 0.0;
|
||||||
|
float vmax = 0.0;
|
||||||
|
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_IQ4_NL; ++j) {
|
||||||
|
const float v = data_s[src_idx + j];
|
||||||
|
if (amax < abs(v)) {
|
||||||
|
amax = abs(v);
|
||||||
|
vmax = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
float d = vmax / kvalues_iq4nl[0];
|
||||||
|
const float id = (d != 0.0) ? 1.0/d : 0.0;
|
||||||
|
|
||||||
|
float sumqx = 0, sumq2 = 0;
|
||||||
|
[[unroll]] for (int j = 0; j < QUANT_K_IQ4_NL/2; ++j) {
|
||||||
|
const float x0 = data_s[src_idx + 0 + j]*id;
|
||||||
|
const float x1 = data_s[src_idx + QUANT_K_IQ4_NL/2 + j]*id;
|
||||||
|
const uint xi0 = best_index(x0);
|
||||||
|
const uint xi1 = best_index(x1);
|
||||||
|
data_q[dst_idx].qs[j] = uint8_t(xi0 | (xi1 << 4));
|
||||||
|
const float v0 = kvalues_iq4nl[xi0];
|
||||||
|
const float v1 = kvalues_iq4nl[xi1];
|
||||||
|
const float w0 = data_s[src_idx + 0 + j]*data_s[src_idx + 0 + j];
|
||||||
|
const float w1 = data_s[src_idx + QUANT_K_IQ4_NL/2 + j]*data_s[src_idx + QUANT_K_IQ4_NL/2 + j];
|
||||||
|
sumqx += w0*v0*data_s[src_idx + j] + w1*v1*data_s[src_idx + QUANT_K_IQ4_NL/2 + j];
|
||||||
|
sumq2 += w0*v0*v0 + w1*v1*v1;
|
||||||
|
}
|
||||||
|
|
||||||
|
data_q[dst_idx].d = float16_t(sumq2 > 0 ? sumqx/sumq2 : d);
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
#if defined(DATA_A_IQ4_NL)
|
||||||
|
init_iq4nl_shmem();
|
||||||
|
if (gl_LocalInvocationIndex.x != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
const uint idx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x * QUANT_K;
|
||||||
|
|
||||||
|
if (idx >= p.ne) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint dst_idx = dst_idx_quant(idx, QUANT_K);
|
||||||
|
uint src_idx = get_aoffset() + src0_idx(idx);
|
||||||
|
|
||||||
|
quantize(dst_idx, src_idx);
|
||||||
|
}
|
|
@ -101,19 +101,25 @@ layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ2_
|
||||||
block_q2_K block;
|
block_q2_K block;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ2_K_packed16 {
|
||||||
|
block_q2_K_packed16 block;
|
||||||
|
};
|
||||||
|
|
||||||
float16_t dequantFuncQ2_K(const in decodeBufQ2_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
|
float16_t dequantFuncQ2_K(const in decodeBufQ2_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
|
||||||
{
|
{
|
||||||
|
decodeBufQ2_K_packed16 bl16 = decodeBufQ2_K_packed16(bl);
|
||||||
const f16vec2 d = bl.block.d;
|
const f16vec2 d = bl.block.d;
|
||||||
const uint idx = coordInBlock[1];
|
const uint idx = coordInBlock[1];
|
||||||
const uint iqs = idx;
|
|
||||||
|
|
||||||
const uint qsi = (iqs / 128) * 32 + (iqs % 32); // 0..31
|
const uint scalesi = (idx & 0xF0) >> 4; // 0..15
|
||||||
const uint scalesi = iqs / 16; // 0..15
|
const uint qsshift = (idx & 0x60) >> 4; // 0,2,4,6
|
||||||
const uint qsshift = ((iqs % 128) / 32) * 2; // 0,2,4,6
|
|
||||||
|
uint qs = uint32_t(bl16.block.qs[((idx & 0x80) >> 3) + ((idx & 0x1E) >> 1)]);
|
||||||
|
qs = (qs >> qsshift) & 0x0303;
|
||||||
|
qs = unpack8(qs)[idx & 1];
|
||||||
|
|
||||||
uint32_t qs = bl.block.qs[qsi];
|
|
||||||
const uint scales = bl.block.scales[scalesi];
|
const uint scales = bl.block.scales[scalesi];
|
||||||
float16_t ret = d.x * float16_t(scales & 0xF) * float16_t((qs >> qsshift) & 3) - d.y * float16_t(scales >> 4);
|
float16_t ret = d.x * float16_t(scales & 0xF) * float16_t(qs) - d.y * float16_t(scales >> 4);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,39 +163,47 @@ layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4
|
||||||
block_q4_K_packed16 block;
|
block_q4_K_packed16 block;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K_packed128 {
|
||||||
|
block_q4_K_packed128 block;
|
||||||
|
};
|
||||||
|
|
||||||
float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
|
float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
|
||||||
{
|
{
|
||||||
decodeBufQ4_K_packed16 bl16 = decodeBufQ4_K_packed16(bl);
|
decodeBufQ4_K_packed16 bl16 = decodeBufQ4_K_packed16(bl);
|
||||||
|
decodeBufQ4_K_packed128 bl128 = decodeBufQ4_K_packed128(bl);
|
||||||
const uint idx = coordInBlock[1];
|
const uint idx = coordInBlock[1];
|
||||||
|
|
||||||
const uint b = (idx & 0x20) >> 5; // 0,1
|
const uint b = (idx & 0x20) >> 5; // 0,1
|
||||||
const uint is = (idx & 0xE0) >> 5; // 0..7
|
const uint is = (idx & 0xE0) >> 5; // 0..7
|
||||||
|
|
||||||
const f16vec2 loadd = bl.block.d;
|
uvec4 v = bl128.block.q4k[0];
|
||||||
|
|
||||||
|
const f16vec2 loadd = unpackFloat2x16(v.x);
|
||||||
|
|
||||||
uint32_t sc;
|
uint32_t sc;
|
||||||
uint32_t mbyte;
|
uint32_t mbyte;
|
||||||
|
|
||||||
uint32_t scidx0 = (is < 4) ? is : (is + 4);
|
uint32_t scale0 = v.y;
|
||||||
uint32_t scidx1 = (is < 4) ? is : (is - 4);
|
uint32_t scale4 = v.z;
|
||||||
uint32_t scidxmask1 = (is < 4) ? 0x30 : 0xC0;
|
uint32_t scale8 = v.w;
|
||||||
uint32_t scidxshift1 = (is < 4) ? 0 : 2;
|
|
||||||
uint32_t mbidx0 = is + 4;
|
|
||||||
uint32_t mbidx1 = (is < 4) ? is + 4 : is;
|
|
||||||
uint32_t mbidxmask0 = (is < 4) ? 0xF : 0xF0;
|
|
||||||
uint32_t mbidxshift0 = (is < 4) ? 0 : 4;
|
|
||||||
uint32_t mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
|
|
||||||
uint32_t mbidxshift1 = (is < 4) ? 0 : 2;
|
|
||||||
|
|
||||||
sc = uint8_t((bl.block.scales[scidx0] & 0xF) | ((bl.block.scales[scidx1] & scidxmask1) >> scidxshift1));
|
uint32_t sc_lo = scale0;
|
||||||
mbyte = uint8_t(((bl.block.scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((bl.block.scales[mbidx1] & mbidxmask1) >> mbidxshift1));
|
uint32_t mb_lo = scale4;
|
||||||
|
uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
|
||||||
|
uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
|
||||||
|
|
||||||
|
sc = is < 4 ? sc_lo : sc_hi;
|
||||||
|
mbyte = is < 4 ? mb_lo : mb_hi;
|
||||||
|
sc = sc >> (8 * (is & 3));
|
||||||
|
mbyte = mbyte >> (8 * (is & 3));
|
||||||
|
sc &= 0x3F;
|
||||||
|
mbyte &= 0x3F;
|
||||||
|
|
||||||
const float16_t d = loadd.x * float16_t(sc);
|
const float16_t d = loadd.x * float16_t(sc);
|
||||||
const float16_t m = loadd.y * float16_t(mbyte);
|
const float16_t m = loadd.y * float16_t(mbyte);
|
||||||
|
|
||||||
uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
|
uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
|
||||||
qs = (qs >> (b * 4)) & 0x0F0F;
|
qs = (qs >> (b * 4 + 8 * (idx & 1))) & 0xF;
|
||||||
qs = unpack8(qs)[idx & 1];
|
|
||||||
|
|
||||||
float16_t ret = d * float16_t(qs) - m;
|
float16_t ret = d * float16_t(qs) - m;
|
||||||
|
|
||||||
|
@ -204,47 +218,53 @@ layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5
|
||||||
block_q5_K_packed16 block;
|
block_q5_K_packed16 block;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K_packed128 {
|
||||||
|
block_q5_K_packed128 block;
|
||||||
|
};
|
||||||
|
|
||||||
float16_t dequantFuncQ5_K(const in decodeBufQ5_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
|
float16_t dequantFuncQ5_K(const in decodeBufQ5_K bl, const in uint blockCoords[2], const in uint coordInBlock[2])
|
||||||
{
|
{
|
||||||
decodeBufQ5_K_packed16 bl16 = decodeBufQ5_K_packed16(bl);
|
decodeBufQ5_K_packed16 bl16 = decodeBufQ5_K_packed16(bl);
|
||||||
|
decodeBufQ5_K_packed128 bl128 = decodeBufQ5_K_packed128(bl);
|
||||||
const uint idx = coordInBlock[1];
|
const uint idx = coordInBlock[1];
|
||||||
|
|
||||||
const uint b = (idx & 0x20) >> 5; // 0,1
|
const uint b = (idx & 0x20) >> 5; // 0,1
|
||||||
const uint is = (idx & 0xE0) >> 5; // 0..7
|
const uint is = (idx & 0xE0) >> 5; // 0..7
|
||||||
|
|
||||||
const uint32_t hm = 0x0101 << is;
|
uvec4 v = bl128.block.q5k[0];
|
||||||
|
|
||||||
const f16vec2 loadd = bl.block.d;
|
const f16vec2 loadd = unpackFloat2x16(v.x);
|
||||||
|
|
||||||
uint32_t sc;
|
uint32_t sc;
|
||||||
uint32_t mbyte;
|
uint32_t mbyte;
|
||||||
|
|
||||||
uint32_t scidx0 = (is < 4) ? is : (is + 4);
|
uint32_t scale0 = v.y;
|
||||||
uint32_t scidx1 = (is < 4) ? is : (is - 4);
|
uint32_t scale4 = v.z;
|
||||||
uint32_t scidxmask1 = (is < 4) ? 0x30 : 0xC0;
|
uint32_t scale8 = v.w;
|
||||||
uint32_t scidxshift1 = (is < 4) ? 0 : 2;
|
|
||||||
uint32_t mbidx0 = is + 4;
|
|
||||||
uint32_t mbidx1 = (is < 4) ? is + 4 : is;
|
|
||||||
uint32_t mbidxmask0 = (is < 4) ? 0xF : 0xF0;
|
|
||||||
uint32_t mbidxshift0 = (is < 4) ? 0 : 4;
|
|
||||||
uint32_t mbidxmask1 = (is < 4) ? 0x30 : 0xC0;
|
|
||||||
uint32_t mbidxshift1 = (is < 4) ? 0 : 2;
|
|
||||||
|
|
||||||
sc = uint8_t((bl.block.scales[scidx0] & 0xF) | ((bl.block.scales[scidx1] & scidxmask1) >> scidxshift1));
|
uint32_t sc_lo = scale0;
|
||||||
mbyte = uint8_t(((bl.block.scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((bl.block.scales[mbidx1] & mbidxmask1) >> mbidxshift1));
|
uint32_t mb_lo = scale4;
|
||||||
|
uint32_t sc_hi = (scale8 & 0x0F0F0F0F) | ((scale0 & 0xC0C0C0C0) >> 2);
|
||||||
|
uint32_t mb_hi = ((scale8 & 0xF0F0F0F0) >> 4) | ((scale4 & 0xC0C0C0C0) >> 2);
|
||||||
|
|
||||||
|
sc = is < 4 ? sc_lo : sc_hi;
|
||||||
|
mbyte = is < 4 ? mb_lo : mb_hi;
|
||||||
|
sc = sc >> (8 * (is & 3));
|
||||||
|
mbyte = mbyte >> (8 * (is & 3));
|
||||||
|
sc &= 0x3F;
|
||||||
|
mbyte &= 0x3F;
|
||||||
|
|
||||||
const float16_t d = loadd.x * float16_t(sc);
|
const float16_t d = loadd.x * float16_t(sc);
|
||||||
const float16_t m = loadd.y * float16_t(mbyte);
|
const float16_t m = loadd.y * float16_t(mbyte);
|
||||||
|
|
||||||
uint qh = uint32_t(bl16.block.qh[(idx & 0x1E) >> 1]);
|
uint qh = uint32_t(bl16.block.qh[(idx & 0x1E) >> 1]);
|
||||||
qh = qh & hm;
|
qh = ((qh >> is) & 0x101) << 4;
|
||||||
qh = unpack8(qh)[idx & 1];
|
|
||||||
|
|
||||||
uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
|
uint qs = uint32_t(bl16.block.qs[((idx & 0xC0) >> 2) + ((idx & 0x1E) >> 1)]);
|
||||||
qs = (qs >> (b * 4)) & 0x0F0F;
|
qs = (qs >> (b * 4)) & 0x0F0F;
|
||||||
qs = unpack8(qs)[idx & 1];
|
qs = unpack8(qs | qh)[idx & 1];
|
||||||
|
|
||||||
float16_t ret = d * (float16_t(qs) + (qh != 0 ? float16_t(16) : float16_t(0))) - m;
|
float16_t ret = d * (float16_t(qs)) - m;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,3 +54,23 @@ uint dst_idx(uint idx) {
|
||||||
const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
|
const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
|
||||||
return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
|
return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint src0_idx_quant(uint idx, uint qk) {
|
||||||
|
const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
|
||||||
|
const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
|
||||||
|
const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
|
||||||
|
const uint i02_offset = i02*p.ne01*p.ne00;
|
||||||
|
const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
|
||||||
|
const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
|
||||||
|
return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + (i00/qk)*p.nb00;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint dst_idx_quant(uint idx, uint qk) {
|
||||||
|
const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
|
||||||
|
const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
|
||||||
|
const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
|
||||||
|
const uint i12_offset = i12*p.ne11*p.ne10;
|
||||||
|
const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
|
||||||
|
const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
|
||||||
|
return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + (i10/qk)*p.nb10;
|
||||||
|
}
|
||||||
|
|
|
@ -227,6 +227,11 @@ struct block_q4_K_packed32
|
||||||
uint32_t qs[QUANT_K_Q4_K/2/4];
|
uint32_t qs[QUANT_K_Q4_K/2/4];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct block_q4_K_packed128
|
||||||
|
{
|
||||||
|
uvec4 q4k[9];
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(DATA_A_Q4_K)
|
#if defined(DATA_A_Q4_K)
|
||||||
#define QUANT_K QUANT_K_Q4_K
|
#define QUANT_K QUANT_K_Q4_K
|
||||||
#define A_TYPE block_q4_K
|
#define A_TYPE block_q4_K
|
||||||
|
@ -252,6 +257,11 @@ struct block_q5_K_packed16
|
||||||
uint16_t qs[QUANT_K_Q5_K/2/2];
|
uint16_t qs[QUANT_K_Q5_K/2/2];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct block_q5_K_packed128
|
||||||
|
{
|
||||||
|
uvec4 q5k[11];
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(DATA_A_Q5_K)
|
#if defined(DATA_A_Q5_K)
|
||||||
#define QUANT_K QUANT_K_Q5_K
|
#define QUANT_K QUANT_K_Q5_K
|
||||||
#define A_TYPE block_q5_K
|
#define A_TYPE block_q5_K
|
||||||
|
|
|
@ -425,6 +425,11 @@ void process_shaders() {
|
||||||
string_to_spv("contig_cpy_f32_f16", "contig_copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
|
string_to_spv("contig_cpy_f32_f16", "contig_copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
|
||||||
string_to_spv("contig_cpy_f16_f16", "contig_copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
string_to_spv("contig_cpy_f16_f16", "contig_copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
||||||
|
|
||||||
|
for (std::string t : {"q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "iq4_nl"}) {
|
||||||
|
string_to_spv("cpy_f32_" + t, "copy_to_quant.comp", {{"DATA_A_" + to_uppercase(t), "1"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
|
string_to_spv("cpy_" + t + "_f32", "copy_from_quant.comp", {{"DATA_A_" + to_uppercase(t), "1"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
|
}
|
||||||
|
|
||||||
string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
|
string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
|
||||||
|
|
||||||
|
|
|
@ -3463,12 +3463,14 @@ struct ggml_tensor * ggml_soft_max_ext(
|
||||||
return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false);
|
return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_soft_max_back
|
// ggml_soft_max_ext_back
|
||||||
|
|
||||||
static struct ggml_tensor * ggml_soft_max_back_impl(
|
static struct ggml_tensor * ggml_soft_max_ext_back_impl(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b,
|
struct ggml_tensor * b,
|
||||||
|
float scale,
|
||||||
|
float max_bias,
|
||||||
bool inplace) {
|
bool inplace) {
|
||||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||||
|
|
||||||
|
@ -3476,21 +3478,28 @@ static struct ggml_tensor * ggml_soft_max_back_impl(
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
result->src[1] = b;
|
result->src[1] = b;
|
||||||
|
|
||||||
|
memcpy((float *) result->op_params + 0, &scale, sizeof(float));
|
||||||
|
memcpy((float *) result->op_params + 1, &max_bias, sizeof(float));
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_soft_max_back(
|
struct ggml_tensor * ggml_soft_max_ext_back(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b) {
|
struct ggml_tensor * b,
|
||||||
return ggml_soft_max_back_impl(ctx, a, b, false);
|
float scale,
|
||||||
|
float max_bias) {
|
||||||
|
return ggml_soft_max_ext_back_impl(ctx, a, b, scale, max_bias, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_soft_max_back_inplace(
|
struct ggml_tensor * ggml_soft_max_ext_back_inplace(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b) {
|
struct ggml_tensor * b,
|
||||||
return ggml_soft_max_back_impl(ctx, a, b, true);
|
float scale,
|
||||||
|
float max_bias) {
|
||||||
|
return ggml_soft_max_ext_back_impl(ctx, a, b, scale, max_bias, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_rope
|
// ggml_rope
|
||||||
|
@ -5089,10 +5098,10 @@ struct ggml_tensor * ggml_cross_entropy_loss_back(
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b,
|
struct ggml_tensor * b,
|
||||||
struct ggml_tensor * c) {
|
struct ggml_tensor * c) {
|
||||||
GGML_ASSERT(ggml_are_same_shape(a, b));
|
GGML_ASSERT(ggml_is_scalar(a));
|
||||||
GGML_ASSERT(ggml_is_scalar(c));
|
GGML_ASSERT(ggml_are_same_shape(b, c));
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
|
struct ggml_tensor * result = ggml_dup_tensor(ctx, b);
|
||||||
|
|
||||||
result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
|
result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK;
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
|
@ -5271,7 +5280,7 @@ static void ggml_sub_or_set(
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_compute_backward(
|
static void ggml_compute_backward(
|
||||||
struct ggml_context * ctx, struct ggml_cgraph * cgraph, int i, bool * grads_needed) {
|
struct ggml_context * ctx, struct ggml_cgraph * cgraph, int i, const bool * grads_needed) {
|
||||||
struct ggml_tensor * tensor = cgraph->nodes[i];
|
struct ggml_tensor * tensor = cgraph->nodes[i];
|
||||||
struct ggml_tensor * grad = ggml_graph_get_grad(cgraph, tensor);
|
struct ggml_tensor * grad = ggml_graph_get_grad(cgraph, tensor);
|
||||||
|
|
||||||
|
@ -5415,7 +5424,7 @@ static void ggml_compute_backward(
|
||||||
if (src0_needs_grads) {
|
if (src0_needs_grads) {
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, tensor->op_params, sizeof(float));
|
memcpy(&eps, tensor->op_params, sizeof(float));
|
||||||
ggml_add_or_set(ctx, cgraph, isrc0, ggml_rms_norm_back(ctx, src0, grad, eps));
|
ggml_add_or_set(ctx, cgraph, isrc0, ggml_rms_norm_back(ctx, grad, src0, eps));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL_MAT: {
|
case GGML_OP_MUL_MAT: {
|
||||||
|
@ -5598,7 +5607,13 @@ static void ggml_compute_backward(
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_SOFT_MAX: {
|
case GGML_OP_SOFT_MAX: {
|
||||||
if (src0_needs_grads) {
|
if (src0_needs_grads) {
|
||||||
ggml_add_or_set(ctx, cgraph, isrc0, ggml_soft_max_back(ctx, grad, tensor));
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (const float *) tensor->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (const float *) tensor->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
ggml_add_or_set(ctx, cgraph, isrc0, ggml_soft_max_ext_back(ctx, grad, tensor, scale, max_bias));
|
||||||
}
|
}
|
||||||
GGML_ASSERT((!src1 || !src1_needs_grads) && "backward pass for softmax mask not implemented");
|
GGML_ASSERT((!src1 || !src1_needs_grads) && "backward pass for softmax mask not implemented");
|
||||||
} break;
|
} break;
|
||||||
|
@ -5639,7 +5654,7 @@ static void ggml_compute_backward(
|
||||||
const int32_t d1 = ggml_get_op_params_i32(tensor, 5);
|
const int32_t d1 = ggml_get_op_params_i32(tensor, 5);
|
||||||
const bool is_2D = ggml_get_op_params_i32(tensor, 6) == 1;
|
const bool is_2D = ggml_get_op_params_i32(tensor, 6) == 1;
|
||||||
|
|
||||||
ggml_add_or_set(ctx, cgraph, isrc1, ggml_im2col_back(ctx, src0, grad, src1->ne, s0, s1, p0, p1, d0, d1, is_2D));
|
ggml_add_or_set(ctx, cgraph, isrc1, ggml_im2col_back(ctx, grad, src0, src1->ne, s0, s1, p0, p1, d0, d1, is_2D));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_POOL_2D: {
|
case GGML_OP_POOL_2D: {
|
||||||
|
@ -5682,7 +5697,7 @@ static void ggml_compute_backward(
|
||||||
} break;
|
} break;
|
||||||
case GGML_UNARY_OP_SILU: {
|
case GGML_UNARY_OP_SILU: {
|
||||||
if (src0_needs_grads) {
|
if (src0_needs_grads) {
|
||||||
ggml_add_or_set(ctx, cgraph, isrc0, ggml_silu_back(ctx, src0, grad));
|
ggml_add_or_set(ctx, cgraph, isrc0, ggml_silu_back(ctx, grad, src0));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_UNARY_OP_EXP: {
|
case GGML_UNARY_OP_EXP: {
|
||||||
|
@ -5699,7 +5714,7 @@ static void ggml_compute_backward(
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_CROSS_ENTROPY_LOSS: {
|
case GGML_OP_CROSS_ENTROPY_LOSS: {
|
||||||
if (src0_needs_grads) {
|
if (src0_needs_grads) {
|
||||||
ggml_add_or_set(ctx, cgraph, isrc0, ggml_cross_entropy_loss_back(ctx, src0, src1, grad));
|
ggml_add_or_set(ctx, cgraph, isrc0, ggml_cross_entropy_loss_back(ctx, grad, src0, src1));
|
||||||
}
|
}
|
||||||
GGML_ASSERT(!src1_needs_grads && "backward pass for labels not implemented");
|
GGML_ASSERT(!src1_needs_grads && "backward pass for labels not implemented");
|
||||||
} break;
|
} break;
|
||||||
|
|
|
@ -290,9 +290,6 @@ extern "C" {
|
||||||
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
||||||
const float * tensor_split;
|
const float * tensor_split;
|
||||||
|
|
||||||
// comma separated list of RPC servers to use for offloading
|
|
||||||
const char * rpc_servers;
|
|
||||||
|
|
||||||
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||||
// If the provided progress_callback returns true, model loading continues.
|
// If the provided progress_callback returns true, model loading continues.
|
||||||
// If it returns false, model loading is immediately aborted.
|
// If it returns false, model loading is immediately aborted.
|
||||||
|
@ -420,10 +417,20 @@ extern "C" {
|
||||||
struct llama_model_params params),
|
struct llama_model_params params),
|
||||||
"use llama_model_load_from_file instead");
|
"use llama_model_load_from_file instead");
|
||||||
|
|
||||||
|
// Load the model from a file
|
||||||
|
// If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
|
||||||
|
// If the split file name does not follow this pattern, use llama_model_load_from_splits
|
||||||
LLAMA_API struct llama_model * llama_model_load_from_file(
|
LLAMA_API struct llama_model * llama_model_load_from_file(
|
||||||
const char * path_model,
|
const char * path_model,
|
||||||
struct llama_model_params params);
|
struct llama_model_params params);
|
||||||
|
|
||||||
|
// Load the model from multiple splits (support custom naming scheme)
|
||||||
|
// The paths must be in the correct order
|
||||||
|
LLAMA_API struct llama_model * llama_model_load_from_splits(
|
||||||
|
const char ** paths,
|
||||||
|
size_t n_paths,
|
||||||
|
struct llama_model_params params);
|
||||||
|
|
||||||
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
|
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
|
||||||
"use llama_model_free instead");
|
"use llama_model_free instead");
|
||||||
|
|
||||||
|
@ -953,7 +960,7 @@ extern "C" {
|
||||||
LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab);
|
LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab);
|
||||||
LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab);
|
LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab);
|
||||||
|
|
||||||
DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocabable_get_text instead");
|
DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead");
|
||||||
DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead");
|
DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead");
|
||||||
DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead");
|
DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead");
|
||||||
DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead");
|
DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead");
|
||||||
|
|
12
koboldcpp.py
12
koboldcpp.py
|
@ -2933,7 +2933,7 @@ def RunServerMultiThreaded(addr, port):
|
||||||
while 1:
|
while 1:
|
||||||
try:
|
try:
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
except KeyboardInterrupt:
|
except (KeyboardInterrupt,SystemExit):
|
||||||
global exitcounter
|
global exitcounter
|
||||||
exitcounter = 999
|
exitcounter = 999
|
||||||
for i in range(numThreads):
|
for i in range(numThreads):
|
||||||
|
@ -4177,7 +4177,7 @@ def show_gui():
|
||||||
ctk.CTkButton(tabs , text = "Update", fg_color="#9900cc", hover_color="#aa11dd", command = display_updates, width=90, height = 35 ).grid(row=1,column=0, stick="sw", padx= 5, pady=5)
|
ctk.CTkButton(tabs , text = "Update", fg_color="#9900cc", hover_color="#aa11dd", command = display_updates, width=90, height = 35 ).grid(row=1,column=0, stick="sw", padx= 5, pady=5)
|
||||||
ctk.CTkButton(tabs , text = "Save", fg_color="#084a66", hover_color="#085a88", command = save_config_gui, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 5, pady=5)
|
ctk.CTkButton(tabs , text = "Save", fg_color="#084a66", hover_color="#085a88", command = save_config_gui, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 5, pady=5)
|
||||||
ctk.CTkButton(tabs , text = "Load", fg_color="#084a66", hover_color="#085a88", command = load_config_gui, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 70, pady=5)
|
ctk.CTkButton(tabs , text = "Load", fg_color="#084a66", hover_color="#085a88", command = load_config_gui, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 70, pady=5)
|
||||||
ctk.CTkButton(tabs , text = "Help", fg_color="#992222", hover_color="#bb3333", command = display_help, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 135, pady=5)
|
ctk.CTkButton(tabs , text = "Help (Find Models)", fg_color="#992222", hover_color="#bb3333", command = display_help, width=100, height = 35 ).grid(row=1,column=1, stick="sw", padx= 135, pady=5)
|
||||||
|
|
||||||
# start a thread that tries to get actual gpu names and layer counts
|
# start a thread that tries to get actual gpu names and layer counts
|
||||||
gpuinfo_thread = threading.Thread(target=auto_set_backend_gui)
|
gpuinfo_thread = threading.Thread(target=auto_set_backend_gui)
|
||||||
|
@ -4189,7 +4189,13 @@ def show_gui():
|
||||||
import_vars(dict)
|
import_vars(dict)
|
||||||
|
|
||||||
# runs main loop until closed or launch clicked
|
# runs main loop until closed or launch clicked
|
||||||
root.mainloop()
|
try:
|
||||||
|
root.mainloop()
|
||||||
|
except (KeyboardInterrupt,SystemExit):
|
||||||
|
exitcounter = 999
|
||||||
|
print("Exiting by user request.")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
if nextstate==0:
|
if nextstate==0:
|
||||||
exitcounter = 999
|
exitcounter = 999
|
||||||
|
|
|
@ -68,6 +68,33 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// return a list of splits for a given path
|
||||||
|
// for example, given "<name>-00002-of-00004.gguf", returns list of all 4 splits
|
||||||
|
static std::vector<std::string> llama_get_list_splits(const std::string & path, const int idx, const int n_split) {
|
||||||
|
std::vector<std::string> paths;
|
||||||
|
std::string split_prefix;
|
||||||
|
std::vector<char> buf(llama_path_max(), 0);
|
||||||
|
|
||||||
|
{
|
||||||
|
int ret = llama_split_prefix(buf.data(), buf.size(), path.c_str(), idx, n_split);
|
||||||
|
if (!ret) {
|
||||||
|
throw std::runtime_error(format("invalid split file name: %s", path.c_str()));
|
||||||
|
}
|
||||||
|
split_prefix = std::string(buf.data(), ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (split_prefix.empty()) {
|
||||||
|
throw std::runtime_error(format("invalid split file: %s", path.c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int idx = 0; idx < n_split; ++idx) {
|
||||||
|
int ret = llama_split_path(buf.data(), buf.size(), split_prefix.c_str(), idx, n_split);
|
||||||
|
paths.push_back(std::string(buf.data(), ret));
|
||||||
|
}
|
||||||
|
|
||||||
|
return paths;
|
||||||
|
}
|
||||||
|
|
||||||
namespace GGUFMeta {
|
namespace GGUFMeta {
|
||||||
template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int64_t)>
|
template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int64_t)>
|
||||||
struct GKV_Base_Type {
|
struct GKV_Base_Type {
|
||||||
|
@ -417,7 +444,12 @@ namespace GGUFMeta {
|
||||||
template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
|
template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
|
||||||
template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
|
template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
|
||||||
|
|
||||||
llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
|
llama_model_loader::llama_model_loader(
|
||||||
|
const std::string & fname,
|
||||||
|
std::vector<std::string> & splits,
|
||||||
|
bool use_mmap,
|
||||||
|
bool check_tensors,
|
||||||
|
const struct llama_model_kv_override * param_overrides_p) {
|
||||||
int trace = 0;
|
int trace = 0;
|
||||||
if (getenv("LLAMA_TRACE")) {
|
if (getenv("LLAMA_TRACE")) {
|
||||||
trace = atoi(getenv("LLAMA_TRACE"));
|
trace = atoi(getenv("LLAMA_TRACE"));
|
||||||
|
@ -429,6 +461,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load the main GGUF
|
||||||
struct ggml_context * ctx = NULL;
|
struct ggml_context * ctx = NULL;
|
||||||
struct gguf_init_params params = {
|
struct gguf_init_params params = {
|
||||||
/*.no_alloc = */ true,
|
/*.no_alloc = */ true,
|
||||||
|
@ -464,35 +497,54 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
|
||||||
|
|
||||||
// Load additional GGML contexts
|
// Load additional GGML contexts
|
||||||
if (n_split > 1) {
|
if (n_split > 1) {
|
||||||
|
// make sure the main file is loaded first
|
||||||
uint16_t idx = 0;
|
uint16_t idx = 0;
|
||||||
get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
|
const std::string kv_split_no = llm_kv(LLM_KV_SPLIT_NO);
|
||||||
|
get_key(kv_split_no, idx);
|
||||||
if (idx != 0) {
|
if (idx != 0) {
|
||||||
throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
|
throw std::runtime_error(format("illegal split file idx: %d (file: %s), model must be loaded with the first split", idx, fname.c_str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<char> split_prefix(llama_path_max(), 0);
|
// generate list of splits if needed
|
||||||
if (!llama_split_prefix(split_prefix.data(), split_prefix.size(), fname.c_str(), idx, n_split)) {
|
if (splits.empty()) {
|
||||||
throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
|
splits = llama_get_list_splits(fname, idx, n_split);
|
||||||
|
}
|
||||||
|
|
||||||
|
// in case user give a custom list of splits, check if it matches the expected number
|
||||||
|
if (n_split != (uint16_t)splits.size()) {
|
||||||
|
throw std::runtime_error(format("invalid split count, given: %zu splits, but expected %d", splits.size(), n_split));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (trace > 0) {
|
if (trace > 0) {
|
||||||
LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
|
LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<char> split_path(llama_path_max(), 0);
|
// load other splits
|
||||||
for (idx = 1; idx < n_split; idx++) {
|
for (idx = 1; idx < n_split; idx++) {
|
||||||
llama_split_path(split_path.data(), split_path.size(), split_prefix.data(), idx, n_split);
|
const char * fname_split = splits[idx].c_str();
|
||||||
|
|
||||||
struct gguf_init_params split_params = {
|
struct gguf_init_params split_params = {
|
||||||
/*.no_alloc = */ true,
|
/*.no_alloc = */ true,
|
||||||
/*.ctx = */ &ctx,
|
/*.ctx = */ &ctx,
|
||||||
};
|
};
|
||||||
gguf_context_ptr ctx_gguf { gguf_init_from_file(split_path.data(), split_params) };
|
gguf_context_ptr ctx_gguf { gguf_init_from_file(fname_split, split_params) };
|
||||||
if (!ctx_gguf) {
|
if (!ctx_gguf) {
|
||||||
throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path.data()));
|
throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, fname_split));
|
||||||
}
|
}
|
||||||
|
|
||||||
files.emplace_back(new llama_file(split_path.data(), "rb"));
|
// check idx
|
||||||
|
{
|
||||||
|
const int kid = gguf_find_key(ctx_gguf.get(), kv_split_no.c_str());
|
||||||
|
if (kid < 0) {
|
||||||
|
throw std::runtime_error(format("missing key %s in GGUF split %s", kv_split_no.c_str(), fname_split));
|
||||||
|
}
|
||||||
|
int idx_gguf = gguf_get_val_u16(ctx_gguf.get(), kid);
|
||||||
|
if (idx_gguf != idx) {
|
||||||
|
throw std::runtime_error(format("invalid split file idx: %d (file: %s), expected %d", idx_gguf, fname_split, idx));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files.emplace_back(new llama_file(fname_split, "rb"));
|
||||||
contexts.emplace_back(ctx);
|
contexts.emplace_back(ctx);
|
||||||
|
|
||||||
// Save tensors data offset info of the shard.
|
// Save tensors data offset info of the shard.
|
||||||
|
|
|
@ -90,7 +90,12 @@ struct llama_model_loader {
|
||||||
size_t size_data = 0;
|
size_t size_data = 0;
|
||||||
std::vector<std::pair<size_t, size_t>> mmaps_used;
|
std::vector<std::pair<size_t, size_t>> mmaps_used;
|
||||||
|
|
||||||
llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p);
|
llama_model_loader(
|
||||||
|
const std::string & fname,
|
||||||
|
std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
|
||||||
|
bool use_mmap,
|
||||||
|
bool check_tensors,
|
||||||
|
const struct llama_model_kv_override * param_overrides_p);
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
||||||
|
|
|
@ -3816,7 +3816,6 @@ struct llama_model_params llama_model_default_params() {
|
||||||
/*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
|
/*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
|
||||||
/*.main_gpu =*/ 0,
|
/*.main_gpu =*/ 0,
|
||||||
/*.tensor_split =*/ nullptr,
|
/*.tensor_split =*/ nullptr,
|
||||||
/*.rpc_servers =*/ nullptr,
|
|
||||||
/*.progress_callback =*/ nullptr,
|
/*.progress_callback =*/ nullptr,
|
||||||
/*.progress_callback_user_data =*/ nullptr,
|
/*.progress_callback_user_data =*/ nullptr,
|
||||||
/*.kv_overrides =*/ nullptr,
|
/*.kv_overrides =*/ nullptr,
|
||||||
|
|
|
@ -323,8 +323,6 @@ struct llama_model {
|
||||||
// gguf metadata
|
// gguf metadata
|
||||||
std::unordered_map<std::string, std::string> gguf_kv;
|
std::unordered_map<std::string, std::string> gguf_kv;
|
||||||
|
|
||||||
std::vector<std::string> rpc_servers;
|
|
||||||
|
|
||||||
// list of devices used in this model
|
// list of devices used in this model
|
||||||
std::vector<ggml_backend_dev_t> devices;
|
std::vector<ggml_backend_dev_t> devices;
|
||||||
|
|
||||||
|
|
|
@ -529,7 +529,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
||||||
kv_overrides = v->data();
|
kv_overrides = v->data();
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
|
std::vector<std::string> splits = {};
|
||||||
|
llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides);
|
||||||
ml.init_mappings(false); // no prefetching
|
ml.init_mappings(false); // no prefetching
|
||||||
|
|
||||||
llama_model model(llama_model_default_params());
|
llama_model model(llama_model_default_params());
|
||||||
|
|
|
@ -664,7 +664,7 @@ struct llm_tokenizer_bpe_session {
|
||||||
// "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
|
// "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
|
||||||
// "Are you sure this is what you want?\n", __FUNCTION__);
|
// "Are you sure this is what you want?\n", __FUNCTION__);
|
||||||
// }
|
// }
|
||||||
// if (vocab.get_add_bos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) {
|
// if (vocab.get_add_eos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) {
|
||||||
// LLAMA_LOG_WARN(
|
// LLAMA_LOG_WARN(
|
||||||
// "%s: Added a EOS token to the prompt as specified by the model but the prompt "
|
// "%s: Added a EOS token to the prompt as specified by the model but the prompt "
|
||||||
// "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
|
// "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
|
||||||
|
@ -3537,4 +3537,4 @@ const std::unordered_map<std::string, llama_token> & llama_vocab::get_token_to_i
|
||||||
const std::vector<llama_vocab::token_data> & llama_vocab::get_id_to_token() const
|
const std::vector<llama_vocab::token_data> & llama_vocab::get_id_to_token() const
|
||||||
{
|
{
|
||||||
return pimpl->id_to_token;
|
return pimpl->id_to_token;
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ static bool old_mixtral_warning_showed = false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
|
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
|
||||||
static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
|
static int llama_model_load(const std::string & fname, std::vector<std::string> & splits, llama_model & model, llama_model_params & params) {
|
||||||
// loading time will be recalculated after the first eval, so
|
// loading time will be recalculated after the first eval, so
|
||||||
// we take page faults deferred by mmap() into consideration
|
// we take page faults deferred by mmap() into consideration
|
||||||
model.t_load_us = 0;
|
model.t_load_us = 0;
|
||||||
|
@ -58,7 +58,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||||
model.t_start_us = tm.t_start_us;
|
model.t_start_us = tm.t_start_us;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
|
llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.kv_overrides);
|
||||||
|
|
||||||
ml.print_info();
|
ml.print_info();
|
||||||
|
|
||||||
|
@ -9414,14 +9414,9 @@ int64_t llama_time_us(void) {
|
||||||
return ggml_time_us();
|
return ggml_time_us();
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_file(
|
static struct llama_model * llama_model_load_from_file_impl(
|
||||||
const char * path_model,
|
const std::string & path_model,
|
||||||
struct llama_model_params params) {
|
std::vector<std::string> & splits,
|
||||||
return llama_model_load_from_file(path_model, params);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct llama_model * llama_model_load_from_file(
|
|
||||||
const char * path_model,
|
|
||||||
struct llama_model_params params) {
|
struct llama_model_params params) {
|
||||||
ggml_time_init();
|
ggml_time_init();
|
||||||
|
|
||||||
|
@ -9444,47 +9439,6 @@ struct llama_model * llama_model_load_from_file(
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.rpc_servers != nullptr && params.rpc_servers[0] != '\0') {
|
|
||||||
// split the servers set them into model->rpc_servers
|
|
||||||
std::string servers(params.rpc_servers);
|
|
||||||
size_t pos = 0;
|
|
||||||
while ((pos = servers.find(',')) != std::string::npos) {
|
|
||||||
std::string server = servers.substr(0, pos);
|
|
||||||
model->rpc_servers.push_back(server);
|
|
||||||
servers.erase(0, pos + 1);
|
|
||||||
}
|
|
||||||
model->rpc_servers.push_back(servers);
|
|
||||||
}
|
|
||||||
|
|
||||||
// add RPC devices
|
|
||||||
if (!model->rpc_servers.empty()) {
|
|
||||||
ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
|
|
||||||
if (!rpc_reg) {
|
|
||||||
LLAMA_LOG_ERROR("%s: failed to find RPC backend\n", __func__);
|
|
||||||
llama_model_free(model);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
|
|
||||||
ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
|
|
||||||
if (!ggml_backend_rpc_add_device_fn) {
|
|
||||||
LLAMA_LOG_ERROR("%s: failed to find RPC device add function\n", __func__);
|
|
||||||
llama_model_free(model);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const std::string & server : model->rpc_servers) {
|
|
||||||
ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
|
|
||||||
if (dev) {
|
|
||||||
model->devices.push_back(dev);
|
|
||||||
} else {
|
|
||||||
LLAMA_LOG_ERROR("%s: failed to add RPC device for server '%s'\n", __func__, server.c_str());
|
|
||||||
llama_model_free(model);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create list of devices to use with this model
|
// create list of devices to use with this model
|
||||||
if (params.devices) {
|
if (params.devices) {
|
||||||
for (ggml_backend_dev_t * dev = params.devices; *dev; ++dev) {
|
for (ggml_backend_dev_t * dev = params.devices; *dev; ++dev) {
|
||||||
|
@ -9525,7 +9479,7 @@ struct llama_model * llama_model_load_from_file(
|
||||||
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
|
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
const int status = llama_model_load(path_model, *model, params);
|
const int status = llama_model_load(path_model, splits, *model, params);
|
||||||
GGML_ASSERT(status <= 0);
|
GGML_ASSERT(status <= 0);
|
||||||
if (status < 0) {
|
if (status < 0) {
|
||||||
if (status == -1) {
|
if (status == -1) {
|
||||||
|
@ -9541,6 +9495,35 @@ struct llama_model * llama_model_load_from_file(
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deprecated
|
||||||
|
struct llama_model * llama_load_model_from_file(
|
||||||
|
const char * path_model,
|
||||||
|
struct llama_model_params params) {
|
||||||
|
return llama_model_load_from_file(path_model, params);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_model * llama_model_load_from_file(
|
||||||
|
const char * path_model,
|
||||||
|
struct llama_model_params params) {
|
||||||
|
std::vector<std::string> splits = {};
|
||||||
|
return llama_model_load_from_file_impl(path_model, splits, params);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_model * llama_model_load_from_splits(
|
||||||
|
const char ** paths,
|
||||||
|
size_t n_paths,
|
||||||
|
struct llama_model_params params) {
|
||||||
|
std::vector<std::string> splits;
|
||||||
|
if (n_paths == 0) {
|
||||||
|
LLAMA_LOG_ERROR("%s: list of splits is empty\n", __func__);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < n_paths; ++i) {
|
||||||
|
splits.push_back(paths[i]);
|
||||||
|
}
|
||||||
|
return llama_model_load_from_file_impl(splits.front(), splits, params);
|
||||||
|
}
|
||||||
|
|
||||||
struct llama_context * llama_init_from_model(
|
struct llama_context * llama_init_from_model(
|
||||||
struct llama_model * model,
|
struct llama_model * model,
|
||||||
struct llama_context_params params) {
|
struct llama_context_params params) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue