mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 17:44:38 +00:00
remove KEY_USE_GLU_MLP
, KEY_USE_RMS_NORM
This commit is contained in:
parent
caa7e57ec5
commit
f69e9fa04d
2 changed files with 29 additions and 133 deletions
|
@ -330,8 +330,6 @@ struct clip_ctx {
|
||||||
float image_std[3];
|
float image_std[3];
|
||||||
bool use_gelu = false;
|
bool use_gelu = false;
|
||||||
bool use_silu = false;
|
bool use_silu = false;
|
||||||
bool use_glu_mlp = false;
|
|
||||||
bool use_rms_norm = false;
|
|
||||||
int32_t ftype = 1;
|
int32_t ftype = 1;
|
||||||
|
|
||||||
gguf_context_ptr ctx_gguf;
|
gguf_context_ptr ctx_gguf;
|
||||||
|
@ -847,7 +845,6 @@ static ggml_cgraph * clip_image_build_graph_qwen2_5_vl(clip_ctx * ctx, const cli
|
||||||
inp = ggml_add(ctx0, inp, model.patch_bias);
|
inp = ggml_add(ctx0, inp, model.patch_bias);
|
||||||
}
|
}
|
||||||
struct ggml_tensor * embeddings = inp;
|
struct ggml_tensor * embeddings = inp;
|
||||||
struct ggml_tensor * pos_embed = nullptr;
|
|
||||||
struct ggml_tensor * window_mask = nullptr;
|
struct ggml_tensor * window_mask = nullptr;
|
||||||
struct ggml_tensor * window_idx = nullptr;
|
struct ggml_tensor * window_idx = nullptr;
|
||||||
struct ggml_tensor * inv_window_idx = nullptr;
|
struct ggml_tensor * inv_window_idx = nullptr;
|
||||||
|
@ -858,17 +855,10 @@ static ggml_cgraph * clip_image_build_graph_qwen2_5_vl(clip_ctx * ctx, const cli
|
||||||
|
|
||||||
// pre-layernorm
|
// pre-layernorm
|
||||||
if (model.pre_ln_w) {
|
if (model.pre_ln_w) {
|
||||||
if (ctx->use_rms_norm) {
|
embeddings = ggml_rms_norm(ctx0, embeddings, eps);
|
||||||
embeddings = ggml_rms_norm(ctx0, embeddings, eps);
|
ggml_set_name(embeddings, "pre_ln");
|
||||||
ggml_set_name(embeddings, "pre_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_mul(ctx0, embeddings, model.pre_ln_w);
|
embeddings = ggml_mul(ctx0, embeddings, model.pre_ln_w);
|
||||||
} else {
|
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
|
||||||
ggml_set_name(embeddings, "pre_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor *> embedding_stack;
|
std::vector<struct ggml_tensor *> embedding_stack;
|
||||||
|
@ -991,17 +981,10 @@ static ggml_cgraph * clip_image_build_graph_qwen2_5_vl(clip_ctx * ctx, const cli
|
||||||
|
|
||||||
// post-layernorm
|
// post-layernorm
|
||||||
if (model.post_ln_w) {
|
if (model.post_ln_w) {
|
||||||
if (ctx->use_rms_norm) {
|
embeddings = ggml_rms_norm(ctx0, embeddings, eps);
|
||||||
embeddings = ggml_rms_norm(ctx0, embeddings, eps);
|
ggml_set_name(embeddings, "post_ln");
|
||||||
ggml_set_name(embeddings, "post_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_mul(ctx0, embeddings, model.post_ln_w);
|
embeddings = ggml_mul(ctx0, embeddings, model.post_ln_w);
|
||||||
} else {
|
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
|
||||||
ggml_set_name(embeddings, "post_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// final layer is a vision feature layer
|
// final layer is a vision feature layer
|
||||||
|
@ -1086,7 +1069,6 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
const int n_head = hparams.n_head;
|
const int n_head = hparams.n_head;
|
||||||
const int d_head = hidden_size / n_head;
|
const int d_head = hidden_size / n_head;
|
||||||
const float eps = hparams.eps;
|
const float eps = hparams.eps;
|
||||||
const bool use_window_attn = hparams.full_attn_layers.size() > 0;
|
|
||||||
int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
|
int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
|
||||||
|
|
||||||
const int batch_size = imgs.entries.size();
|
const int batch_size = imgs.entries.size();
|
||||||
|
@ -1118,7 +1100,6 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
|
|
||||||
auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
|
auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
|
||||||
inp = ggml_add(ctx0, inp, inp_1);
|
inp = ggml_add(ctx0, inp, inp_1);
|
||||||
|
|
||||||
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
|
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b]
|
||||||
inp = ggml_reshape_4d(
|
inp = ggml_reshape_4d(
|
||||||
ctx0, inp,
|
ctx0, inp,
|
||||||
|
@ -1140,11 +1121,8 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
// inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
|
// inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
|
||||||
inp = ggml_add(ctx0, inp, model.patch_bias);
|
inp = ggml_add(ctx0, inp, model.patch_bias);
|
||||||
}
|
}
|
||||||
struct ggml_tensor * embeddings = inp;
|
struct ggml_tensor * embeddings = inp;
|
||||||
struct ggml_tensor * pos_embed = nullptr;
|
struct ggml_tensor * pos_embed = nullptr;
|
||||||
struct ggml_tensor * window_mask = nullptr;
|
|
||||||
struct ggml_tensor * window_idx = nullptr;
|
|
||||||
struct ggml_tensor * inv_window_idx = nullptr;
|
|
||||||
|
|
||||||
if (ctx->has_llava_projector) {
|
if (ctx->has_llava_projector) {
|
||||||
// concat class_embeddings and patch_embeddings
|
// concat class_embeddings and patch_embeddings
|
||||||
|
@ -1186,40 +1164,16 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
|
|
||||||
// pre-layernorm
|
// pre-layernorm
|
||||||
if (model.pre_ln_w) {
|
if (model.pre_ln_w) {
|
||||||
if (ctx->use_rms_norm) {
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
embeddings = ggml_rms_norm(ctx0, embeddings, eps);
|
ggml_set_name(embeddings, "pre_ln");
|
||||||
ggml_set_name(embeddings, "pre_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_mul(ctx0, embeddings, model.pre_ln_w);
|
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
|
||||||
} else {
|
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
|
||||||
ggml_set_name(embeddings, "pre_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor *> embedding_stack;
|
std::vector<struct ggml_tensor *> embedding_stack;
|
||||||
const auto & vision_feature_layer = hparams.vision_feature_layer;
|
const auto & vision_feature_layer = hparams.vision_feature_layer;
|
||||||
|
|
||||||
// loop over layers
|
// loop over layers
|
||||||
|
|
||||||
if (use_window_attn) {
|
|
||||||
inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions / 4);
|
|
||||||
ggml_set_name(inv_window_idx, "inv_window_idx");
|
|
||||||
ggml_set_input(inv_window_idx);
|
|
||||||
// mask for window attention
|
|
||||||
window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, num_positions, num_positions);
|
|
||||||
ggml_set_name(window_mask, "window_mask");
|
|
||||||
ggml_set_input(window_mask);
|
|
||||||
|
|
||||||
// embeddings shape: [hidden_size, patches_w * patches_h, batch_size]
|
|
||||||
GGML_ASSERT(batch_size == 1);
|
|
||||||
embeddings = ggml_reshape_2d(ctx0, embeddings, hidden_size * 4, patches_w * patches_h * batch_size / 4);
|
|
||||||
embeddings = ggml_get_rows(ctx0, embeddings, inv_window_idx);
|
|
||||||
embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size, patches_w * patches_h, batch_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int il = 0; il < ctx->max_feature_layer; il++) {
|
for (int il = 0; il < ctx->max_feature_layer; il++) {
|
||||||
struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
|
struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
|
||||||
|
|
||||||
|
@ -1232,12 +1186,9 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
//const size_t nb_q_w = model.layers[il].q_w->nb[0];
|
//const size_t nb_q_w = model.layers[il].q_w->nb[0];
|
||||||
|
|
||||||
// layernorm1
|
// layernorm1
|
||||||
if (ctx->use_rms_norm) {
|
{
|
||||||
cur = ggml_rms_norm(ctx0, cur, eps);
|
|
||||||
cur = ggml_mul(ctx0, cur, model.layers[il].ln_1_w);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
cur = ggml_norm(ctx0, cur, eps);
|
cur = ggml_norm(ctx0, cur, eps);
|
||||||
|
|
||||||
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
|
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
|
||||||
model.layers[il].ln_1_b);
|
model.layers[il].ln_1_b);
|
||||||
}
|
}
|
||||||
|
@ -1277,14 +1228,7 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
|
V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
|
||||||
|
|
||||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||||
const bool inlist = std::find(hparams.full_attn_layers.begin(), hparams.full_attn_layers.end(), il) != hparams.full_attn_layers.end();
|
KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
|
||||||
const bool full_attn = use_window_attn ? inlist : true;
|
|
||||||
if (full_attn) {
|
|
||||||
KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
|
|
||||||
} else {
|
|
||||||
KQ = ggml_soft_max_ext(ctx0, KQ, window_mask, 1.0f / sqrtf((float)d_head), 0.0f);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
|
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
|
||||||
KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
|
KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
|
||||||
KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||||
|
@ -1301,50 +1245,25 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
embeddings = cur; // embeddings = residual, cur = hidden_states
|
embeddings = cur; // embeddings = residual, cur = hidden_states
|
||||||
|
|
||||||
// layernorm2
|
// layernorm2
|
||||||
if (ctx->use_rms_norm) {
|
{
|
||||||
cur = ggml_rms_norm(ctx0, cur, eps);
|
|
||||||
cur = ggml_mul(ctx0, cur, model.layers[il].ln_2_w);
|
|
||||||
} else {
|
|
||||||
cur = ggml_norm(ctx0, cur, eps);
|
cur = ggml_norm(ctx0, cur, eps);
|
||||||
|
|
||||||
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
|
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
// mlp
|
cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
|
||||||
if (ctx->use_glu_mlp) {
|
cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
|
||||||
// ffn_up
|
|
||||||
auto cur_up = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
|
|
||||||
cur_up = ggml_add(ctx0, cur_up, model.layers[il].ff_o_b);
|
|
||||||
|
|
||||||
auto cur_gate = ggml_mul_mat(ctx0, model.layers[il].ff_g_w, cur);
|
if (ctx->use_gelu) {
|
||||||
cur_gate = ggml_add(ctx0, cur_gate, model.layers[il].ff_g_b);
|
cur = ggml_gelu_inplace(ctx0, cur);
|
||||||
if (ctx->use_gelu) {
|
} else if (ctx->use_silu) {
|
||||||
cur_gate = ggml_gelu_inplace(ctx0, cur_gate);
|
cur = ggml_silu_inplace(ctx0, cur);
|
||||||
} else if (ctx->use_silu) {
|
} else {
|
||||||
cur_gate = ggml_silu_inplace(ctx0, cur_gate);
|
cur = ggml_gelu_quick_inplace(ctx0, cur);
|
||||||
} else {
|
|
||||||
cur_gate = ggml_gelu_quick_inplace(ctx0, cur_gate);
|
|
||||||
}
|
|
||||||
cur = ggml_mul(ctx0, cur_gate, cur_up);
|
|
||||||
|
|
||||||
// ffn_down
|
|
||||||
cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
|
|
||||||
cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
|
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
|
|
||||||
cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
|
|
||||||
|
|
||||||
if (ctx->use_gelu) {
|
cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
|
||||||
cur = ggml_gelu_inplace(ctx0, cur);
|
cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
|
||||||
} else if (ctx->use_silu) {
|
|
||||||
cur = ggml_silu_inplace(ctx0, cur);
|
|
||||||
} else {
|
|
||||||
cur = ggml_gelu_quick_inplace(ctx0, cur);
|
|
||||||
}
|
|
||||||
|
|
||||||
cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
|
|
||||||
cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
|
|
||||||
}
|
|
||||||
|
|
||||||
// residual 2
|
// residual 2
|
||||||
cur = ggml_add(ctx0, embeddings, cur);
|
cur = ggml_add(ctx0, embeddings, cur);
|
||||||
|
@ -1354,17 +1273,10 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
|
|
||||||
// post-layernorm
|
// post-layernorm
|
||||||
if (model.post_ln_w) {
|
if (model.post_ln_w) {
|
||||||
if (ctx->use_rms_norm) {
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
embeddings = ggml_rms_norm(ctx0, embeddings, eps);
|
ggml_set_name(embeddings, "post_ln");
|
||||||
ggml_set_name(embeddings, "post_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_mul(ctx0, embeddings, model.post_ln_w);
|
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
|
||||||
} else {
|
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
|
||||||
ggml_set_name(embeddings, "post_ln");
|
|
||||||
|
|
||||||
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// final layer is a vision feature layer
|
// final layer is a vision feature layer
|
||||||
|
@ -1678,18 +1590,6 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
|
||||||
embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
|
embeddings = ggml_add(ctx0, embeddings, model.mm_1_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_window_attn) {
|
|
||||||
window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions / 4);
|
|
||||||
ggml_set_name(window_idx, "window_idx");
|
|
||||||
ggml_set_input(window_idx);
|
|
||||||
|
|
||||||
// embeddings shape: [hidden_size, patches_w * patches_h, batch_size]
|
|
||||||
GGML_ASSERT(batch_size == 1);
|
|
||||||
embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, patches_w * patches_h / 4);
|
|
||||||
embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
|
|
||||||
embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, patches_w * patches_h / 4, batch_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// build the graph
|
// build the graph
|
||||||
ggml_build_forward_expand(gf, embeddings);
|
ggml_build_forward_expand(gf, embeddings);
|
||||||
|
|
||||||
|
@ -1810,8 +1710,6 @@ struct clip_model_loader {
|
||||||
|
|
||||||
get_bool(KEY_USE_GELU, ctx_clip.use_gelu, false);
|
get_bool(KEY_USE_GELU, ctx_clip.use_gelu, false);
|
||||||
get_bool(KEY_USE_SILU, ctx_clip.use_silu, false);
|
get_bool(KEY_USE_SILU, ctx_clip.use_silu, false);
|
||||||
get_bool(KEY_USE_GLU_MLP, ctx_clip.use_glu_mlp, false);
|
|
||||||
get_bool(KEY_USE_RMS_NORM, ctx_clip.use_rms_norm, false);
|
|
||||||
|
|
||||||
get_u32(string_format(KEY_N_EMBD, "vision"), hparams.hidden_size);
|
get_u32(string_format(KEY_N_EMBD, "vision"), hparams.hidden_size);
|
||||||
get_u32(string_format(KEY_N_HEAD, "vision"), hparams.n_head);
|
get_u32(string_format(KEY_N_HEAD, "vision"), hparams.n_head);
|
||||||
|
|
|
@ -152,8 +152,6 @@ def main(args):
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
|
|
||||||
if args.model_type == "qwen2.5vl":
|
if args.model_type == "qwen2.5vl":
|
||||||
fout.add_bool("clip.use_glu_mlp", True) # gate linear unit MLP layer in vision model
|
|
||||||
fout.add_bool("clip.use_rms_norm", True)
|
|
||||||
fout.add_array("clip.vision.fullatt_block_indexes", vcfg.fullatt_block_indexes)
|
fout.add_array("clip.vision.fullatt_block_indexes", vcfg.fullatt_block_indexes)
|
||||||
fout.add_uint32("clip.vision.window_size", vcfg.window_size)
|
fout.add_uint32("clip.vision.window_size", vcfg.window_size)
|
||||||
fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.hidden_size)
|
fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.hidden_size)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue