remove commented-out code blocks

This commit is contained in:
HimariO 2025-04-04 15:44:31 +08:00
parent dde96b4774
commit 223edef897
2 changed files with 1 additions and 76 deletions

View file

@ -779,9 +779,6 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
KQ = ggml_soft_max_ext(ctx0, KQ, nullptr, 1.0f / sqrtf((float)d_head), 0.0f);
} else {
KQ = ggml_soft_max_ext(ctx0, KQ, window_mask, 1.0f, 0.0f);
// KQ = ggml_scale_inplace(ctx0, KQ, 1.0f / sqrt((float)d_head));
// KQ = ggml_add(ctx0, KQ, window_mask);
// KQ = ggml_soft_max_inplace(ctx0, KQ);
}
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
@ -2754,9 +2751,8 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
for (int dx = 0; dx < 2; dx++) {
auto remap = idx[ptr / mpow];
remap = remap * mpow + (ptr % mpow);
// auto remap = ptr;
positions_data[remap] = y + dy;
positions_data[remap] = y + dy;
positions_data[num_patches + remap] = x + dx;
positions_data[num_patches * 2 + remap] = y + dy;
positions_data[num_patches * 3 + remap] = x + dx;
@ -2851,7 +2847,6 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
}
}
if (window_idx) ggml_backend_tensor_set(window_idx, idx.data(), 0, ggml_nbytes(window_idx));
if (inv_window_idx) ggml_backend_tensor_set(inv_window_idx, inv_idx.data(), 0, ggml_nbytes(inv_window_idx));
if (window_mask) ggml_backend_tensor_set(window_mask, mask.data(), 0, ggml_nbytes(window_mask));

View file

@ -567,76 +567,6 @@ static void debug_dump_img_embed(struct llava_context * ctx_llava, model_output_
}
}
static void dump_win_attn_mask() {
const int image_size_width = 196;
const int image_size_height = 140;
const int patch_size = 14;
const int attn_window_size = 112;
const int merge_ratio = 2;
const int ipw = image_size_width / patch_size;
const int iph = image_size_height / patch_size;
const int pw = image_size_width / patch_size / merge_ratio;
const int ph = image_size_height / patch_size / merge_ratio;
const int grid_window = attn_window_size / patch_size / merge_ratio;
/*
pw * ph = number of tokens output by ViT after apply patch merger
ipw * ipw = number of vision token been processed inside ViT
*/
std::vector<int> idx(ph * pw);
std::vector<int> inv_idx(ph * pw);
int dst = 0;
// [num_vision_tokens, num_vision_tokens] attention mask tensor
int ne = pow(ipw * iph, 2);
std::vector<float> mask(ne, std::numeric_limits<float>::lowest());
int mask_row = 0;
for (int y = 0; y < ph; y+=grid_window)
{
for (int x = 0; x < pw; x+=grid_window)
{
const int win_h = std::min(grid_window, ph - y);
const int win_w = std::min(grid_window, pw - x);
const int dst_0 = dst;
// group all tokens belong to the same window togather (to a continue range)
for (int dy = 0; dy < win_h; dy++) {
for (int dx = 0; dx < win_w; dx++) {
const int src = (y + dy) * pw + (x + dx);
assert(src < (int)idx.size());
assert(dst < (int)inv_idx.size());
idx[src] = dst;
inv_idx[dst] = src;
dst++;
}
}
for (int r=0; r < win_h * win_w * merge_ratio * merge_ratio; r++) {
int row_offset = mask_row * (ipw * iph);
std::fill(
mask.begin() + row_offset + (dst_0 * merge_ratio * merge_ratio),
mask.begin() + row_offset + (dst * merge_ratio * merge_ratio),
0.0);
mask_row++;
}
}
}
auto output_path = "win_attn_mask_fp32.bin";
std::ofstream outFile(output_path, std::ios::binary);
if (outFile.is_open()) {
outFile.write(reinterpret_cast<const char*>(mask.data()), ne * sizeof(float));
outFile.close();
std::cout << "Data successfully written to " << output_path << std::endl;
} else {
std::cerr << "Error opening file!" << std::endl;
}
}
#endif