diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 8f78ffcd8..8f8874086 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -39,7 +39,6 @@ #include #include #include -#include #if defined(LLAVA_LOG_OFF) # define LOG_INF(...) @@ -103,8 +102,6 @@ static std::string format(const char * fmt, ...) { #define KEY_HAS_QWEN2VL_MERGER "clip.has_qwen2vl_merger" #define KEY_USE_GELU "clip.use_gelu" #define KEY_USE_SILU "clip.use_silu" -#define KEY_USE_GLU_MLP "clip.use_glu_mlp" -#define KEY_USE_RMS_NORM "clip.use_rms_norm" #define KEY_N_EMBD "clip.%s.embedding_length" #define KEY_N_FF "clip.%s.feed_forward_length" #define KEY_N_BLOCK "clip.%s.block_count" @@ -123,8 +120,6 @@ static std::string format(const char * fmt, ...) { #define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type" #define KEY_IMAGE_GRID_PINPOINTS "clip.vision.image_grid_pinpoints" #define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution" -#define KEY_FULLATTN_BLK_IDX "clip.vision.fullatt_block_indexes" -#define KEY_ATTN_WINDOW_SIZE "clip.vision.window_size" // @@ -143,7 +138,6 @@ static std::string format(const char * fmt, ...) { #define TN_ATTN_OUTPUT "%s.blk.%d.attn_out.%s" #define TN_FFN_DOWN "%s.blk.%d.ffn_down.%s" #define TN_FFN_UP "%s.blk.%d.ffn_up.%s" -#define TN_FFN_GATE "%s.blk.%d.ffn_gate.%s" #define TN_LN_1 "%s.blk.%d.ln1.%s" #define TN_LN_2 "%s.blk.%d.ln2.%s" #define TN_LN_PRE "%s.pre_ln.%s" @@ -453,8 +447,6 @@ struct clip_hparams { std::vector image_grid_pinpoints; int32_t image_crop_resolution; std::unordered_set vision_feature_layer; - int32_t attn_window_size; - std::vector full_attn_layers; }; struct clip_layer { @@ -480,9 +472,6 @@ struct clip_layer { struct ggml_tensor * ff_o_w; struct ggml_tensor * ff_o_b; - struct ggml_tensor * ff_g_w = NULL; - struct ggml_tensor * ff_g_b = NULL; - // layernorm 2 struct ggml_tensor * ln_2_w; struct ggml_tensor * ln_2_b; @@ -612,8 +601,6 @@ struct clip_ctx { float image_std[3]; bool use_gelu = false; bool use_silu = false; - bool use_glu_mlp = false; - bool use_rms_norm = false; int32_t ftype = 1; bool has_class_embedding = true; @@ -869,7 +856,6 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im const int n_head = hparams.n_head; const int d_head = hidden_size / n_head; const float eps = hparams.eps; - const bool use_window_attn = hparams.full_attn_layers.size() > 0; int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4}; const int batch_size = imgs->size; @@ -920,11 +906,8 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp)); inp = ggml_add(ctx0, inp, model.patch_bias); } - struct ggml_tensor * embeddings = inp; - struct ggml_tensor * pos_embed = nullptr; - struct ggml_tensor * window_mask = nullptr; - struct ggml_tensor * window_idx = nullptr; - struct ggml_tensor * inv_window_idx = nullptr; + struct ggml_tensor * embeddings = inp; + struct ggml_tensor * pos_embed = nullptr; if (ctx->has_llava_projector) { // concat class_embeddings and patch_embeddings @@ -966,41 +949,16 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im // pre-layernorm if (ctx->has_pre_norm) { - if (ctx->use_rms_norm) { - embeddings = ggml_rms_norm(ctx0, embeddings, eps); - ggml_set_name(embeddings, "pre_ln"); + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "pre_ln"); - embeddings = ggml_mul(ctx0, embeddings, model.pre_ln_w); - } else { - embeddings = ggml_norm(ctx0, embeddings, eps); - ggml_set_name(embeddings, "pre_ln"); - - embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b); - } + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b); } std::vector embedding_stack; const auto & vision_feature_layer = hparams.vision_feature_layer; // loop over layers - - if (use_window_attn) { - inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions / 4); - ggml_set_name(inv_window_idx, "inv_window_idx"); - ggml_set_input(inv_window_idx); - // mask for window attention - window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, num_positions, num_positions); - ggml_set_name(window_mask, "window_mask"); - ggml_set_input(window_mask); - - // embeddings shape: [hidden_size, patches_w * patches_h, batch_size] - GGML_ASSERT(batch_size == 1); - embeddings = ggml_reshape_2d(ctx0, embeddings, hidden_size * 4, patches_w * patches_h * batch_size / 4); - embeddings = ggml_get_rows(ctx0, embeddings, inv_window_idx); - embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size, patches_w * patches_h, batch_size); - - } - for (int il = 0; il < ctx->max_feature_layer; il++) { struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states @@ -1013,12 +971,9 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im //const size_t nb_q_w = model.layers[il].q_w->nb[0]; // layernorm1 - if (ctx->use_rms_norm) { - cur = ggml_rms_norm(ctx0, cur, eps); - cur = ggml_mul(ctx0, cur, model.layers[il].ln_1_w); - } - else { + { cur = ggml_norm(ctx0, cur, eps); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), model.layers[il].ln_1_b); } @@ -1059,15 +1014,7 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size); struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - const bool inlist = std::find(hparams.full_attn_layers.begin(), hparams.full_attn_layers.end(), il) != hparams.full_attn_layers.end(); - const bool full_attn = use_window_attn ? inlist : true; - if (full_attn) { - KQ = ggml_soft_max_inplace(ctx0, KQ); - } else { - KQ = ggml_soft_max_ext(ctx0, KQ, window_mask, 1.0f, 0.0f); - - } - + KQ = ggml_soft_max_inplace(ctx0, KQ); struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ); KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size); KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); @@ -1084,50 +1031,25 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im embeddings = cur; // embeddings = residual, cur = hidden_states // layernorm2 - if (ctx->use_rms_norm) { - cur = ggml_rms_norm(ctx0, cur, eps); - cur = ggml_mul(ctx0, cur, model.layers[il].ln_2_w); - } else { + { cur = ggml_norm(ctx0, cur, eps); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b); } - // mlp - if (ctx->use_glu_mlp) { - // ffn_up - auto cur_up = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); - cur_up = ggml_add(ctx0, cur_up, model.layers[il].ff_o_b); + cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); - auto cur_gate = ggml_mul_mat(ctx0, model.layers[il].ff_g_w, cur); - cur_gate = ggml_add(ctx0, cur_gate, model.layers[il].ff_g_b); - if (ctx->use_gelu) { - cur_gate = ggml_gelu_inplace(ctx0, cur_gate); - } else if (ctx->use_silu) { - cur_gate = ggml_silu_inplace(ctx0, cur_gate); - } else { - cur_gate = ggml_gelu_quick_inplace(ctx0, cur_gate); - } - cur = ggml_mul(ctx0, cur_gate, cur_up); - - // ffn_down - cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); - cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); + if (ctx->use_gelu) { + cur = ggml_gelu_inplace(ctx0, cur); + } else if (ctx->use_silu) { + cur = ggml_silu_inplace(ctx0, cur); + } else { + cur = ggml_gelu_quick_inplace(ctx0, cur); } - else { - cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); - cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); - if (ctx->use_gelu) { - cur = ggml_gelu_inplace(ctx0, cur); - } else if (ctx->use_silu) { - cur = ggml_silu_inplace(ctx0, cur); - } else { - cur = ggml_gelu_quick_inplace(ctx0, cur); - } - - cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); - cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b); - } + cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b); // residual 2 cur = ggml_add(ctx0, embeddings, cur); @@ -1137,17 +1059,10 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im // post-layernorm if (ctx->has_post_norm) { - if (ctx->use_rms_norm) { - embeddings = ggml_rms_norm(ctx0, embeddings, eps); - ggml_set_name(embeddings, "post_ln"); + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "post_ln"); - embeddings = ggml_mul(ctx0, embeddings, model.post_ln_w); - } else { - embeddings = ggml_norm(ctx0, embeddings, eps); - ggml_set_name(embeddings, "post_ln"); - - embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b); - } + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b); } // final layer is a vision feature layer @@ -1460,18 +1375,6 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im embeddings = ggml_add(ctx0, embeddings, model.mm_1_b); } - if (use_window_attn) { - window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions / 4); - ggml_set_name(window_idx, "window_idx"); - ggml_set_input(window_idx); - - // embeddings shape: [hidden_size, patches_w * patches_h, batch_size] - GGML_ASSERT(batch_size == 1); - embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, patches_w * patches_h / 4); - embeddings = ggml_get_rows(ctx0, embeddings, window_idx); - embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, patches_w * patches_h / 4, batch_size); - } - // build the graph ggml_build_forward_expand(gf, embeddings); @@ -1668,20 +1571,6 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p new_clip->use_silu = false; } - try { - idx = get_key_idx(ctx, KEY_USE_GLU_MLP); - new_clip->use_glu_mlp = gguf_get_val_bool(ctx, idx); - } catch (std::runtime_error & /*e*/) { - new_clip->use_glu_mlp = false; - } - - try { - idx = get_key_idx(ctx, KEY_USE_RMS_NORM); - new_clip->use_rms_norm = gguf_get_val_bool(ctx, idx); - } catch (std::runtime_error & /*e*/) { - new_clip->use_rms_norm = false; - } - if (verbosity >= 1) { LOG_INF("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder); LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); @@ -1816,18 +1705,6 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p const float * mean_data = (const float *)gguf_get_arr_data(ctx, idx_mean); const float * std_data = (const float *)gguf_get_arr_data(ctx, idx_std); - try { - int idx_full_attn_layers = get_key_idx(ctx, KEY_FULLATTN_BLK_IDX); - auto n_full_attn_layers = gguf_get_arr_n(ctx, idx_full_attn_layers); - const int * full_attn_layers = (const int *)gguf_get_arr_data(ctx, idx_full_attn_layers); - hparams.full_attn_layers.assign(full_attn_layers, full_attn_layers + n_full_attn_layers); - - int idx_window_size = get_key_idx(ctx, KEY_ATTN_WINDOW_SIZE); - hparams.attn_window_size = gguf_get_val_u32(ctx, idx_window_size); - } catch (std::runtime_error & /*e*/) { - hparams.attn_window_size = 0; - } - for (int i = 0; i < 3; ++i) { new_clip->image_mean[i] = mean_data[i]; new_clip->image_std[i] = std_data[i]; @@ -1878,15 +1755,8 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p } try { + vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight")); vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias")); - vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight")); - new_clip->has_post_norm = true; - } catch (std::exception & /*e*/) { - new_clip->has_post_norm = false; - } - try { - // in case of rms norm, there will be only ln weight - vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight")); new_clip->has_post_norm = true; } catch (std::exception & /*e*/) { new_clip->has_post_norm = false; @@ -2046,17 +1916,10 @@ struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_p layer.q_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "bias")); layer.v_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "bias")); layer.o_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "bias")); + layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias")); + layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias")); layer.ff_i_b = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "bias")); layer.ff_o_b = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "bias")); - - if (!new_clip->use_rms_norm) { - layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias")); - layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias")); - } - if (new_clip->use_glu_mlp) { - layer.ff_g_w = get_tensor(new_clip->ctx_data, format(TN_FFN_GATE, "v", il, "weight")); - layer.ff_g_b = get_tensor(new_clip->ctx_data, format(TN_FFN_GATE, "v", il, "bias")); - } } } @@ -3163,96 +3026,30 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } if (ctx->has_qwen2vl_merger) { - /* - pw * ph = number of tokens output by ViT after apply patch merger - ipw * ipw = number of vision token been processed inside ViT - */ - const int merge_ratio = 2; - const int pw = image_size_width / patch_size / merge_ratio; - const int ph = image_size_height / patch_size / merge_ratio; - const int ipw = image_size_width / patch_size; - const int iph = image_size_height / patch_size; - - std::vector idx(ph * pw); - std::vector inv_idx(ph * pw); - - if (hparams.attn_window_size > 0) { - struct ggml_tensor * window_idx = ggml_graph_get_tensor(gf, "window_idx"); - struct ggml_tensor * inv_window_idx = ggml_graph_get_tensor(gf, "inv_window_idx"); - struct ggml_tensor * window_mask = ggml_graph_get_tensor(gf, "window_mask"); - - const int grid_window = hparams.attn_window_size / patch_size / merge_ratio; - int dst = 0; - // [num_vision_tokens, num_vision_tokens] attention mask tensor - std::vector mask(pow(ipw * iph, 2), std::numeric_limits::lowest()); - int mask_row = 0; - - for (int y = 0; y < ph; y+=grid_window) - { - for (int x = 0; x < pw; x+=grid_window) - { - const int win_h = std::min(grid_window, ph - y); - const int win_w = std::min(grid_window, pw - x); - const int dst_0 = dst; - // group all tokens belong to the same window togather (to a continue range) - for (int dy = 0; dy < win_h; dy++) { - for (int dx = 0; dx < win_w; dx++) { - const int src = (y + dy) * pw + (x + dx); - assert(src < (int)idx.size()); - assert(dst < (int)inv_idx.size()); - idx[src] = dst; - inv_idx[dst] = src; - dst++; - } - } - - for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) { - int row_offset = mask_row * (ipw * iph); - std::fill( - mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio), - mask.begin() + row_offset + (dst * merge_ratio * merge_ratio), - 0.0); - mask_row++; - } - } - } - - if (window_idx) ggml_backend_tensor_set(window_idx, idx.data(), 0, ggml_nbytes(window_idx)); - if (inv_window_idx) ggml_backend_tensor_set(inv_window_idx, inv_idx.data(), 0, ggml_nbytes(inv_window_idx)); - if (window_mask) ggml_backend_tensor_set(window_mask, mask.data(), 0, ggml_nbytes(window_mask)); - } else { - std::iota(idx.begin(), idx.end(), 0); - std::iota(inv_idx.begin(), inv_idx.end(), 0); - } - struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); - // const int pw = image_size_width / patch_size; - // const int ph = image_size_height / patch_size; - const int mpow = (merge_ratio * merge_ratio); + const int pw = image_size_width / patch_size; + const int ph = image_size_height / patch_size; int* positions_data = (int*)malloc(ggml_nbytes(positions)); int ptr = 0; - for (int y = 0; y < iph; y+=merge_ratio) + for (int y = 0; y < ph; y+=2) { - for (int x = 0; x < ipw; x+=merge_ratio) + for (int x = 0; x < pw; x+=2) { for (int dy = 0; dy < 2; dy++) { for (int dx = 0; dx < 2; dx++) { - auto remap = idx[ptr / mpow]; - remap = remap * mpow + (ptr % mpow); - - positions_data[remap] = y + dy; - positions_data[num_patches + remap] = x + dx; - positions_data[num_patches * 2 + remap] = y + dy; - positions_data[num_patches * 3 + remap] = x + dx; + positions_data[ptr] = y + dy; + positions_data[num_patches + ptr] = x + dx; + positions_data[num_patches * 2 + ptr] = y + dy; + positions_data[num_patches * 3 + ptr] = x + dx; ptr++; } } } } - if (positions) ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); free(positions_data); } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) { @@ -3284,65 +3081,6 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } } - if (hparams.attn_window_size > 0 && ctx->has_qwen2vl_merger) { // TODO: add use_window_attn? - struct ggml_tensor * window_idx = ggml_graph_get_tensor(gf, "window_idx"); - struct ggml_tensor * inv_window_idx = ggml_graph_get_tensor(gf, "inv_window_idx"); - struct ggml_tensor * window_mask = ggml_graph_get_tensor(gf, "window_mask"); - - const int merge_ratio = 2; - const int pw = image_size_width / patch_size / merge_ratio; - const int ph = image_size_height / patch_size / merge_ratio; - const int grid_window = hparams.attn_window_size / patch_size / merge_ratio; - const int ipw = image_size_width / patch_size; - const int iph = image_size_height / patch_size; - /* - pw * ph = number of tokens output by ViT after apply patch merger - ipw * ipw = number of vision token been processed inside ViT - */ - - std::vector idx(ph * pw); - std::vector inv_idx(ph * pw); - int dst = 0; - // [num_vision_tokens, num_vision_tokens] attention mask tensor - std::vector mask(pow(ipw * iph, 2), std::numeric_limits::lowest()); - int mask_row = 0; - - for (int y = 0; y < ph; y+=grid_window) - { - for (int x = 0; x < pw; x+=grid_window) - { - const int win_h = std::min(grid_window, ph - y); - const int win_w = std::min(grid_window, pw - x); - const int dst_0 = dst; - // group all tokens belong to the same window togather (to a continue range) - for (int dy = 0; dy < win_h; dy++) { - for (int dx = 0; dx < win_w; dx++) { - const int src = (y + dy) * pw + (x + dx); - assert(src < (int)idx.size()); - assert(dst < (int)inv_idx.size()); - idx[src] = dst; - inv_idx[dst] = src; - dst++; - } - } - - for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) { - int row_offset = mask_row * (ipw * iph); - std::fill( - mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio), - mask.begin() + row_offset + (dst * merge_ratio * merge_ratio), - 0.0); - mask_row++; - } - } - } - - - if (window_idx) ggml_backend_tensor_set(window_idx, idx.data(), 0, ggml_nbytes(window_idx)); - if (inv_window_idx) ggml_backend_tensor_set(inv_window_idx, inv_idx.data(), 0, ggml_nbytes(inv_window_idx)); - if (window_mask) ggml_backend_tensor_set(window_mask, mask.data(), 0, ggml_nbytes(window_mask)); - } - if (ggml_backend_is_cpu(ctx->backend)) { ggml_backend_cpu_set_n_threads(ctx->backend, n_threads); } diff --git a/examples/llava/qwen2_vl_surgery.py b/examples/llava/qwen2_vl_surgery.py index 8f7a94e5c..c87606b4f 100644 --- a/examples/llava/qwen2_vl_surgery.py +++ b/examples/llava/qwen2_vl_surgery.py @@ -5,12 +5,10 @@ import torch import numpy as np from gguf import * from transformers import ( - AutoProcessor, Qwen2VLForConditionalGeneration, - Qwen2_5_VLForConditionalGeneration, Qwen2VLProcessor, - Qwen2VLConfig, - Qwen2_5_VLConfig, + AutoProcessor, + Qwen2VLConfig ) @@ -20,80 +18,62 @@ VISION = "clip.vision" def k(raw_key: str, arch: str) -> str: return raw_key.format(arch=arch) -class VL2: - @staticmethod - def to_gguf_name(name: str) -> str: - og = name - name = name.replace("text_model", "t").replace("vision_model", "v") - name = name.replace("blocks", "blk").replace("embeddings.", "") - name = name.replace("attn.", "attn_") - name = name.replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("proj.", "out.") - # name = name.replace("layrnorm", "ln").replace("layer_norm", "ln").replace("layernorm", "ln") - name = name.replace("norm1", "ln1").replace("norm2", "ln2") - name = name.replace("merger.mlp", 'mm') - print(f"[to_gguf_name] {og} --> {name}") - return name +def to_gguf_name(name: str) -> str: + og = name + name = name.replace("text_model", "t").replace("vision_model", "v") + name = name.replace("blocks", "blk").replace("embeddings.", "") + name = name.replace("attn.", "attn_") + name = name.replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("proj.", "out.") + # name = name.replace("layrnorm", "ln").replace("layer_norm", "ln").replace("layernorm", "ln") + name = name.replace("norm1", "ln1").replace("norm2", "ln2") + name = name.replace("merger.mlp", 'mm') + print(f"[to_gguf_name] {og} --> {name}") + return name - @classmethod - def find_vision_tensors(cls, qwen2vl, dtype) -> Dict[str, np.ndarray]: - vision_model = qwen2vl.visual - tensor_map = {} - for name, ten in vision_model.state_dict().items(): - ten = ten.numpy() - if 'qkv' in name: - if ten.ndim == 2: # weight - c3, _ = ten.shape - else: # bias - c3 = ten.shape[0] - assert c3 % 3 == 0 - c = c3 // 3 - wq = ten[:c] - wk = ten[c: c * 2] - wv = ten[c * 2:] - tensor_map[cls.to_gguf_name(f"vision_model.{name}").replace("qkv", "q")] = wq - tensor_map[cls.to_gguf_name(f"vision_model.{name}").replace("qkv", "k")] = wk - tensor_map[cls.to_gguf_name(f"vision_model.{name}").replace("qkv", "v")] = wv - elif 'merger' in name: - if name.endswith("ln_q.weight"): - tensor_map['v.post_ln.weight'] = ten - elif name.endswith("ln_q.bias"): - tensor_map['v.post_ln.bias'] = ten - else: - # "merger.mlp.%d.weight/bias" --> "mm.%d.weight/bias" - tensor_map[cls.to_gguf_name(name)] = ten - elif 'patch_embed.proj.weight' in name: - # NOTE: split Conv3D into Conv2Ds - c1, c2, kt, kh, kw = ten.shape - assert kt == 2, "Current implmentation only support temporal_patch_size of 2" - tensor_map["v.patch_embd.weight"] = ten[:, :, 0, ...] - tensor_map["v.patch_embd.weight.1"] = ten[:, :, 1, ...] + +def find_vision_tensors(qwen2vl, dtype) -> Dict[str, np.ndarray]: + vision_model = qwen2vl.visual + tensor_map = {} + for name, ten in vision_model.state_dict().items(): + ten = ten.numpy() + if 'qkv' in name: + if ten.ndim == 2: # weight + c3, _ = ten.shape + else: # bias + c3 = ten.shape[0] + assert c3 % 3 == 0 + c = c3 // 3 + wq = ten[:c] + wk = ten[c: c * 2] + wv = ten[c * 2:] + tensor_map[to_gguf_name(f"vision_model.{name}").replace("qkv", "q")] = wq + tensor_map[to_gguf_name(f"vision_model.{name}").replace("qkv", "k")] = wk + tensor_map[to_gguf_name(f"vision_model.{name}").replace("qkv", "v")] = wv + elif 'merger' in name: + if name.endswith("ln_q.weight"): + tensor_map['v.post_ln.weight'] = ten + elif name.endswith("ln_q.bias"): + tensor_map['v.post_ln.bias'] = ten else: - tensor_map[cls.to_gguf_name(f"vision_model.{name}")] = ten + # "merger.mlp.%d.weight/bias" --> "mm.%d.weight/bias" + tensor_map[to_gguf_name(name)] = ten + elif 'patch_embed.proj.weight' in name: + # NOTE: split Conv3D into Conv2Ds + c1, c2, kt, kh, kw = ten.shape + assert kt == 2, "Current implmentation only support temporal_patch_size of 2" + tensor_map["v.patch_embd.weight"] = ten[:, :, 0, ...] + tensor_map["v.patch_embd.weight.1"] = ten[:, :, 1, ...] + else: + tensor_map[to_gguf_name(f"vision_model.{name}")] = ten - for new_name, ten in tensor_map.items(): - if ten.ndim <= 1 or new_name.endswith("_norm.weight"): - tensor_map[new_name] = ten.astype(np.float32) - else: - tensor_map[new_name] = ten.astype(dtype) - tensor_map["v.position_embd.weight"] = np.zeros([10, 10], dtype=np.float32) # dummy tensor, just here as a placeholder - return tensor_map - - -class VL25(VL2): - - @staticmethod - def to_gguf_name(name: str) -> str: - og = name - name = name.replace("text_model", "t").replace("vision_model", "v") - name = name.replace("blocks", "blk").replace("embeddings.", "") - name = name.replace("attn.", "attn_") - name = name.replace("mlp.down_proj", "ffn_down").replace("mlp.up_proj", "ffn_up") - name = name.replace("mlp.gate_proj", "ffn_gate").replace("proj.", "out.") - name = name.replace("norm1", "ln1").replace("norm2", "ln2") - name = name.replace("merger.mlp", 'mm') - print(f"[vl25][to_gguf_name] {og} --> {name}") - return name + for new_name, ten in tensor_map.items(): + if ten.ndim <= 1 or new_name.endswith("_norm.weight"): + tensor_map[new_name] = ten.astype(np.float32) + else: + tensor_map[new_name] = ten.astype(dtype) + tensor_map["v.position_embd.weight"] = np.zeros([10, 10], dtype=np.float32) # dummy tensor, just here as a placeholder + return tensor_map def main(args): @@ -112,18 +92,11 @@ def main(args): model_path = "" model_name = args.model_name print("model_name: ", model_name) - if args.model_type == "qwen2vl": - qwen2vl = Qwen2VLForConditionalGeneration.from_pretrained( - model_name, torch_dtype=dtype, device_map="cpu" - ) - cfg: Qwen2VLConfig = qwen2vl.config # type: ignore[reportAssignmentType] - vcfg = cfg.vision_config - else: - qwen2vl = Qwen2_5_VLForConditionalGeneration.from_pretrained( - model_name, torch_dtype=dtype, device_map="cpu" - ) - cfg: Qwen2_5_VLConfig = qwen2vl.config # type: ignore[reportAssignmentType] - vcfg = cfg.vision_config + qwen2vl = Qwen2VLForConditionalGeneration.from_pretrained( + model_name, torch_dtype=dtype, device_map="cpu" + ) + cfg: Qwen2VLConfig = qwen2vl.config # type: ignore[reportAssignmentType] + vcfg = cfg.vision_config if os.path.isdir(model_name): local_model = True @@ -152,26 +125,14 @@ def main(args): else: raise ValueError() - if args.model_type == "qwen2.5vl": - fout.add_bool("clip.use_glu_mlp", True) # gate linear unit MLP layer in vision model - fout.add_bool("clip.use_rms_norm", True) - fout.add_array("clip.vision.fullatt_block_indexes", vcfg.fullatt_block_indexes) - fout.add_uint32("clip.vision.window_size", vcfg.window_size) - fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.hidden_size) - fout.add_uint32("clip.vision.projection_dim", vcfg.out_hidden_size) - else: - fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.embed_dim) - fout.add_uint32("clip.vision.projection_dim", vcfg.hidden_size) - - if args.model_type == "qwen2.5vl": - tensor_map = VL25.find_vision_tensors(qwen2vl, np_dtype) - else: - tensor_map = VL2.find_vision_tensors(qwen2vl, np_dtype) + tensor_map = find_vision_tensors(qwen2vl, np_dtype) for name, data in tensor_map.items(): fout.add_tensor(name, data) fout.add_uint32("clip.vision.patch_size", vcfg.patch_size) fout.add_uint32("clip.vision.image_size", 14 * 40) # some reasonable size that is divable by (14*2) + fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.embed_dim) + fout.add_uint32("clip.vision.projection_dim", vcfg.hidden_size) fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), vcfg.num_heads) fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6) fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), vcfg.depth) @@ -199,7 +160,6 @@ def main(args): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("model_name", nargs='?', default="Qwen/Qwen2-VL-2B-Instruct") - parser.add_argument("--model_type", nargs='?', choices=['qwen2vl', 'qwen2.5vl'], default="qwen2vl") parser.add_argument("--data_type", nargs='?', choices=['fp32', 'fp16'], default="fp32") args = parser.parse_args() main(args)