mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 17:44:38 +00:00
mtmd : support MiniCPM-V 4.0 (#14983)
* support minicpm-v 4 * add md * support MiniCPM-o 4.0 * add default location * temp rm MiniCPM-o 4.0 * fix code * fix "minicpmv_projector" default path
This commit is contained in:
parent
36e5fe7bcd
commit
952a47f455
8 changed files with 145 additions and 15 deletions
|
@ -29,8 +29,8 @@ cmake --build build --config Release
|
||||||
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-o-2_6-gguf) by us)
|
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-o-2_6-gguf) by us)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python ./tools/mtmd/minicpmv-surgery.py -m ../MiniCPM-o-2_6
|
python ./tools/mtmd/legacy-models/minicpmv-surgery.py -m ../MiniCPM-o-2_6
|
||||||
python ./tools/mtmd/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-o-2_6 --minicpmv-projector ../MiniCPM-o-2_6/minicpmv.projector --output-dir ../MiniCPM-o-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 4
|
python ./tools/mtmd/legacy-models/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-o-2_6 --minicpmv-projector ../MiniCPM-o-2_6/minicpmv.projector --output-dir ../MiniCPM-o-2_6/ --minicpmv_version 4
|
||||||
python ./convert_hf_to_gguf.py ../MiniCPM-o-2_6/model
|
python ./convert_hf_to_gguf.py ../MiniCPM-o-2_6/model
|
||||||
|
|
||||||
# quantize int4 version
|
# quantize int4 version
|
||||||
|
|
47
docs/multimodal/minicpmo4.0.md
Normal file
47
docs/multimodal/minicpmo4.0.md
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
## MiniCPM-o 4
|
||||||
|
|
||||||
|
### Prepare models and code
|
||||||
|
|
||||||
|
Download [MiniCPM-o-4](https://huggingface.co/openbmb/MiniCPM-o-4) PyTorch model from huggingface to "MiniCPM-o-4" folder.
|
||||||
|
|
||||||
|
|
||||||
|
### Build llama.cpp
|
||||||
|
Readme modification time: 20250206
|
||||||
|
|
||||||
|
If there are differences in usage, please refer to the official build [documentation](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md)
|
||||||
|
|
||||||
|
Clone llama.cpp:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/ggerganov/llama.cpp
|
||||||
|
cd llama.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
Build llama.cpp using `CMake`:
|
||||||
|
```bash
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Usage of MiniCPM-o 4
|
||||||
|
|
||||||
|
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-o-4-gguf) by us)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python ./tools/mtmd/legacy-models/minicpmv-surgery.py -m ../MiniCPM-o-4
|
||||||
|
python ./tools/mtmd/legacy-models/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-o-4 --minicpmv-projector ../MiniCPM-o-4/minicpmv.projector --output-dir ../MiniCPM-o-4/ --minicpmv_version 6
|
||||||
|
python ./convert_hf_to_gguf.py ../MiniCPM-o-4/model
|
||||||
|
|
||||||
|
# quantize int4 version
|
||||||
|
./build/bin/llama-quantize ../MiniCPM-o-4/model/ggml-model-f16.gguf ../MiniCPM-o-4/model/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Inference on Linux or Mac
|
||||||
|
```bash
|
||||||
|
# run in single-turn mode
|
||||||
|
./build/bin/llama-mtmd-cli -m ../MiniCPM-o-4/model/ggml-model-f16.gguf --mmproj ../MiniCPM-o-4/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
|
||||||
|
|
||||||
|
# run in conversation mode
|
||||||
|
./build/bin/llama-mtmd-cli -m ../MiniCPM-o-4/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-o-4/mmproj-model-f16.gguf
|
||||||
|
```
|
|
@ -28,8 +28,8 @@ cmake --build build --config Release
|
||||||
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5-gguf) by us)
|
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5-gguf) by us)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python ./tools/mtmd/minicpmv-surgery.py -m ../MiniCPM-Llama3-V-2_5
|
python ./tools/mtmd/legacy-models/minicpmv-surgery.py -m ../MiniCPM-Llama3-V-2_5
|
||||||
python ./tools/mtmd/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-Llama3-V-2_5 --minicpmv-projector ../MiniCPM-Llama3-V-2_5/minicpmv.projector --output-dir ../MiniCPM-Llama3-V-2_5/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 2
|
python ./tools/mtmd/legacy-models/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-Llama3-V-2_5 --minicpmv-projector ../MiniCPM-Llama3-V-2_5/minicpmv.projector --output-dir ../MiniCPM-Llama3-V-2_5/ --minicpmv_version 2
|
||||||
python ./convert_hf_to_gguf.py ../MiniCPM-Llama3-V-2_5/model
|
python ./convert_hf_to_gguf.py ../MiniCPM-Llama3-V-2_5/model
|
||||||
|
|
||||||
# quantize int4 version
|
# quantize int4 version
|
||||||
|
|
|
@ -28,8 +28,8 @@ cmake --build build --config Release
|
||||||
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) by us)
|
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) by us)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python ./tools/mtmd/minicpmv-surgery.py -m ../MiniCPM-V-2_6
|
python ./tools/mtmd/legacy-models/minicpmv-surgery.py -m ../MiniCPM-V-2_6
|
||||||
python ./tools/mtmd/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2_6 --minicpmv-projector ../MiniCPM-V-2_6/minicpmv.projector --output-dir ../MiniCPM-V-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 3
|
python ./tools/mtmd/legacy-models/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2_6 --minicpmv-projector ../MiniCPM-V-2_6/minicpmv.projector --output-dir ../MiniCPM-V-2_6/ --minicpmv_version 3
|
||||||
python ./convert_hf_to_gguf.py ../MiniCPM-V-2_6/model
|
python ./convert_hf_to_gguf.py ../MiniCPM-V-2_6/model
|
||||||
|
|
||||||
# quantize int4 version
|
# quantize int4 version
|
||||||
|
|
47
docs/multimodal/minicpmv4.0.md
Normal file
47
docs/multimodal/minicpmv4.0.md
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
## MiniCPM-V 4
|
||||||
|
|
||||||
|
### Prepare models and code
|
||||||
|
|
||||||
|
Download [MiniCPM-V-4](https://huggingface.co/openbmb/MiniCPM-V-4) PyTorch model from huggingface to "MiniCPM-V-4" folder.
|
||||||
|
|
||||||
|
|
||||||
|
### Build llama.cpp
|
||||||
|
Readme modification time: 20250206
|
||||||
|
|
||||||
|
If there are differences in usage, please refer to the official build [documentation](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md)
|
||||||
|
|
||||||
|
Clone llama.cpp:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/ggerganov/llama.cpp
|
||||||
|
cd llama.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
Build llama.cpp using `CMake`:
|
||||||
|
```bash
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Usage of MiniCPM-V 4
|
||||||
|
|
||||||
|
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-4-gguf) by us)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python ./tools/mtmd/legacy-models/minicpmv-surgery.py -m ../MiniCPM-V-4
|
||||||
|
python ./tools/mtmd/legacy-models/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-4 --minicpmv-projector ../MiniCPM-V-4/minicpmv.projector --output-dir ../MiniCPM-V-4/ --minicpmv_version 5
|
||||||
|
python ./convert_hf_to_gguf.py ../MiniCPM-V-4/model
|
||||||
|
|
||||||
|
# quantize int4 version
|
||||||
|
./build/bin/llama-quantize ../MiniCPM-V-4/model/ggml-model-f16.gguf ../MiniCPM-V-4/model/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Inference on Linux or Mac
|
||||||
|
```bash
|
||||||
|
# run in single-turn mode
|
||||||
|
./build/bin/llama-mtmd-cli -m ../MiniCPM-V-4/model/ggml-model-f16.gguf --mmproj ../MiniCPM-V-4/mmproj-model-f16.gguf -c 4096 --temp 0.7 --top-p 0.8 --top-k 100 --repeat-penalty 1.05 --image xx.jpg -p "What is in the image?"
|
||||||
|
|
||||||
|
# run in conversation mode
|
||||||
|
./build/bin/llama-mtmd-cli -m ../MiniCPM-V-4/model/ggml-model-Q4_K_M.gguf --mmproj ../MiniCPM-V-4/mmproj-model-f16.gguf
|
||||||
|
```
|
|
@ -868,10 +868,16 @@ struct clip_graph {
|
||||||
int n_head = n_embd/d_head;
|
int n_head = n_embd/d_head;
|
||||||
int num_query = 96;
|
int num_query = 96;
|
||||||
if (ctx->model.hparams.minicpmv_version == 2) {
|
if (ctx->model.hparams.minicpmv_version == 2) {
|
||||||
|
// MiniCPM-V 2.5
|
||||||
num_query = 96;
|
num_query = 96;
|
||||||
} else if (ctx->model.hparams.minicpmv_version == 3) {
|
} else if (ctx->model.hparams.minicpmv_version == 3) {
|
||||||
|
// MiniCPM-V 2.6
|
||||||
num_query = 64;
|
num_query = 64;
|
||||||
} else if (ctx->model.hparams.minicpmv_version == 4) {
|
} else if (ctx->model.hparams.minicpmv_version == 4) {
|
||||||
|
// MiniCPM-o 2.6
|
||||||
|
num_query = 64;
|
||||||
|
} else if (ctx->model.hparams.minicpmv_version == 5) {
|
||||||
|
// MiniCPM-V 4.0
|
||||||
num_query = 64;
|
num_query = 64;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3551,10 +3557,16 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
|
||||||
case PROJECTOR_TYPE_MINICPMV:
|
case PROJECTOR_TYPE_MINICPMV:
|
||||||
{
|
{
|
||||||
if (params.minicpmv_version == 2) {
|
if (params.minicpmv_version == 2) {
|
||||||
|
// MiniCPM-V 2.5
|
||||||
n_patches_sq = 96;
|
n_patches_sq = 96;
|
||||||
} else if (params.minicpmv_version == 3) {
|
} else if (params.minicpmv_version == 3) {
|
||||||
|
// MiniCPM-V 2.6
|
||||||
n_patches_sq = 64;
|
n_patches_sq = 64;
|
||||||
} else if (params.minicpmv_version == 4) {
|
} else if (params.minicpmv_version == 4) {
|
||||||
|
// MiniCPM-o 2.6
|
||||||
|
n_patches_sq = 64;
|
||||||
|
} else if (params.minicpmv_version == 5) {
|
||||||
|
// MiniCPM-V 4.0
|
||||||
n_patches_sq = 64;
|
n_patches_sq = 64;
|
||||||
} else {
|
} else {
|
||||||
GGML_ABORT("Unknown minicpmv version");
|
GGML_ABORT("Unknown minicpmv version");
|
||||||
|
@ -4103,11 +4115,17 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
||||||
return ctx->model.mm_3_b->ne[0];
|
return ctx->model.mm_3_b->ne[0];
|
||||||
case PROJECTOR_TYPE_MINICPMV:
|
case PROJECTOR_TYPE_MINICPMV:
|
||||||
if (hparams.minicpmv_version == 2) {
|
if (hparams.minicpmv_version == 2) {
|
||||||
|
// MiniCPM-V 2.5
|
||||||
return 4096;
|
return 4096;
|
||||||
} else if (hparams.minicpmv_version == 3) {
|
} else if (hparams.minicpmv_version == 3) {
|
||||||
|
// MiniCPM-V 2.6
|
||||||
return 3584;
|
return 3584;
|
||||||
} else if (hparams.minicpmv_version == 4) {
|
} else if (hparams.minicpmv_version == 4) {
|
||||||
|
// MiniCPM-o 2.6
|
||||||
return 3584;
|
return 3584;
|
||||||
|
} else if (hparams.minicpmv_version == 5) {
|
||||||
|
// MiniCPM-V 4.0
|
||||||
|
return 2560;
|
||||||
}
|
}
|
||||||
GGML_ABORT("Unknown minicpmv version");
|
GGML_ABORT("Unknown minicpmv version");
|
||||||
case PROJECTOR_TYPE_GLM_EDGE:
|
case PROJECTOR_TYPE_GLM_EDGE:
|
||||||
|
|
|
@ -497,11 +497,11 @@ ap.add_argument("--projector-type", help="Type of projector. Possible values: ml
|
||||||
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
|
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
|
||||||
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
|
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
|
||||||
# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
|
# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
|
||||||
default_image_mean = [0.48145466, 0.4578275, 0.40821073]
|
default_image_mean = [0.5, 0.5, 0.5]
|
||||||
default_image_std = [0.26862954, 0.26130258, 0.27577711]
|
default_image_std = [0.5, 0.5, 0.5]
|
||||||
ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
|
ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
|
||||||
ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
|
ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
|
||||||
ap.add_argument('--minicpmv_version', type=int, help='minicpmv_version: MiniCPM-V-2 use 1; MiniCPM-V-2.5 use 2; MiniCPM-V-2.6 use 3; MiniCPM-o-2.6 use 4', default=2)
|
ap.add_argument('--minicpmv_version', type=int, help='minicpmv_version: MiniCPM-V-2 use 1; MiniCPM-V-2.5 use 2; MiniCPM-V-2.6 use 3; MiniCPM-o-2.6 use 4; MiniCPM-V 4.0 use 5; MiniCPM-o-4.0 use 6', default=2)
|
||||||
|
|
||||||
# with proper
|
# with proper
|
||||||
args = ap.parse_args()
|
args = ap.parse_args()
|
||||||
|
@ -517,6 +517,17 @@ if args.use_f32:
|
||||||
# output in the same directory as the model if output_dir is None
|
# output in the same directory as the model if output_dir is None
|
||||||
dir_model = args.model_dir
|
dir_model = args.model_dir
|
||||||
|
|
||||||
|
# If minicpmv_projector is not specified but the default path exists, use the default path
|
||||||
|
if args.minicpmv_projector is None:
|
||||||
|
default_projector_path = os.path.join(dir_model, "minicpmv.projector")
|
||||||
|
if os.path.isfile(default_projector_path):
|
||||||
|
args.minicpmv_projector = default_projector_path
|
||||||
|
print(f"Found default projector file: {default_projector_path}")
|
||||||
|
|
||||||
|
# If output_dir is not specified, use model_dir as the default value
|
||||||
|
if args.output_dir is None:
|
||||||
|
args.output_dir = dir_model
|
||||||
|
|
||||||
if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip:
|
if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip:
|
||||||
vocab = None
|
vocab = None
|
||||||
tokens = None
|
tokens = None
|
||||||
|
@ -546,18 +557,21 @@ if args.use_f32:
|
||||||
minicpmv_version = args.minicpmv_version
|
minicpmv_version = args.minicpmv_version
|
||||||
emb_dim = 4096
|
emb_dim = 4096
|
||||||
block_count = 26
|
block_count = 26
|
||||||
if minicpmv_version == 1:
|
if minicpmv_version == 1: # MiniCPM-V 2.0
|
||||||
emb_dim = 2304
|
emb_dim = 2304
|
||||||
block_count = 26
|
block_count = 26
|
||||||
elif minicpmv_version == 2:
|
elif minicpmv_version == 2: # MiniCPM-V 2.5
|
||||||
emb_dim = 4096
|
emb_dim = 4096
|
||||||
block_count = 27
|
block_count = 27
|
||||||
elif minicpmv_version == 3:
|
elif minicpmv_version == 3: # MiniCPM-V 2.6
|
||||||
emb_dim = 3584
|
emb_dim = 3584
|
||||||
block_count = 27
|
block_count = 27
|
||||||
elif minicpmv_version == 4:
|
elif minicpmv_version == 4: # MiniCPM-o 2.6
|
||||||
emb_dim = 3584
|
emb_dim = 3584
|
||||||
block_count = 27
|
block_count = 27
|
||||||
|
elif minicpmv_version == 5: # MiniCPM-V 4.0
|
||||||
|
emb_dim = 2560
|
||||||
|
block_count = 27
|
||||||
|
|
||||||
default_vision_config = {
|
default_vision_config = {
|
||||||
"hidden_size": 1152,
|
"hidden_size": 1152,
|
||||||
|
@ -577,6 +591,10 @@ if minicpmv_version == 3:
|
||||||
elif minicpmv_version == 4:
|
elif minicpmv_version == 4:
|
||||||
vision_config = SiglipVisionConfig(**default_vision_config)
|
vision_config = SiglipVisionConfig(**default_vision_config)
|
||||||
model = SiglipVisionTransformer(vision_config)
|
model = SiglipVisionTransformer(vision_config)
|
||||||
|
elif minicpmv_version == 5:
|
||||||
|
default_vision_config["model_type"] = "siglip_vision_model"
|
||||||
|
vision_config = SiglipVisionConfig(**default_vision_config)
|
||||||
|
model = SiglipVisionTransformer(vision_config)
|
||||||
|
|
||||||
processor = None
|
processor = None
|
||||||
# if model.attn_pool is not None:
|
# if model.attn_pool is not None:
|
||||||
|
@ -603,7 +621,7 @@ elif args.vision_only:
|
||||||
else:
|
else:
|
||||||
fname_middle = ""
|
fname_middle = ""
|
||||||
|
|
||||||
output_dir = args.output_dir if args.output_dir is not None else dir_model
|
output_dir = args.output_dir
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
output_prefix = os.path.basename(output_dir).replace("ggml_", "")
|
output_prefix = os.path.basename(output_dir).replace("ggml_", "")
|
||||||
fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf")
|
fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf")
|
||||||
|
|
|
@ -207,7 +207,7 @@ struct mtmd_context {
|
||||||
tok_row_end_trail = false; // no trailing end-of-row token
|
tok_row_end_trail = false; // no trailing end-of-row token
|
||||||
ov_img_first = true;
|
ov_img_first = true;
|
||||||
|
|
||||||
} else if (minicpmv_version == 3 || minicpmv_version == 4) {
|
} else if (minicpmv_version == 3 || minicpmv_version == 4 || minicpmv_version == 5) {
|
||||||
// minicpmv 2.6 format:
|
// minicpmv 2.6 format:
|
||||||
// <slice> (slice) </slice><slice> (slice) </slice>\n ...
|
// <slice> (slice) </slice><slice> (slice) </slice>\n ...
|
||||||
slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_6;
|
slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_6;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue