From 2fbc3b2ae544e69800d41c097e049732e65db508 Mon Sep 17 00:00:00 2001 From: JustCommitRandomness <51818859+JustCommitRandomness@users.noreply.github.com> Date: Fri, 6 Mar 2026 11:06:18 +0000 Subject: [PATCH] Adjust int types in format strings (#2009) * tweak format sting types This may not be all of them, but it's the ones which warn on OpenBSD * complete the changes needed to fix the format string specifers * avoid using inttypes, directly cast to size_t (u64 usually) instead --------- Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com> --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 2 +- gpttype_adapter.cpp | 6 +++--- otherarch/embeddings_adapter.cpp | 6 +++--- otherarch/gpt2_v2.cpp | 6 +++--- otherarch/gptj_v2.cpp | 6 +++--- otherarch/gptj_v3.cpp | 4 ++-- otherarch/rwkv_v2.cpp | 6 +++--- otherarch/sdcpp/sdtype_adapter.cpp | 2 +- otherarch/tts_adapter.cpp | 26 ++++++++++++------------ otherarch/ttscpp/src/phonemizer.cpp | 2 +- otherarch/ttscpp/src/tts_model.cpp | 4 ++-- otherarch/whispercpp/whisper_adapter.cpp | 2 +- tools/mtmd/clip.cpp | 2 +- 13 files changed, 37 insertions(+), 37 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 1c032a581..90a7f3827 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -2535,7 +2535,7 @@ static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std void *import_ptr = nullptr) { VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags_list.begin()[0]) << ", " << to_string(req_flags_list.begin()[req_flags_list.size()-1]) << ")"); if (size > device->max_buffer_size) { - printf("\nWARNING: Requested buffer size (%zu) exceeds device max_buffer_size limit (%zu)!\n",size,device->max_buffer_size); + printf("\nWARNING: Requested buffer size (%zu) exceeds device max_buffer_size limit (%zu)!\n",size,(size_t)device->max_buffer_size); } vk_buffer buf = std::make_shared(); diff --git a/gpttype_adapter.cpp b/gpttype_adapter.cpp index 9fa2d5b01..efdb45f10 100644 --- a/gpttype_adapter.cpp +++ b/gpttype_adapter.cpp @@ -2384,7 +2384,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in devices_override = kcpp_parse_device_list(dev_override_str); if(devices_override.size()>0) { - printf("\nOverriding with %d devices...\n",devices_override.size()-1); + printf("\nOverriding with %zu devices...\n",devices_override.size()-1); model_params.devices = devices_override.data(); } } @@ -2562,7 +2562,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in model_params.tensor_buft_overrides = tenos.data(); model_params.tensor_split = tensor_split_temp; model_params.n_gpu_layers = -1; //must be this value to be considered default - printf("Autofit Reserve Space: %d MB\n",taxmb); + printf("Autofit Reserve Space: %zu MB\n",taxmb); //disable log spam bool dospam = (debugmode==1 && !is_quiet); ggml_log_callback currlogger; @@ -5441,4 +5441,4 @@ int get_oldest_slot(int excludeSlotId) } } return slotid; -} \ No newline at end of file +} diff --git a/otherarch/embeddings_adapter.cpp b/otherarch/embeddings_adapter.cpp index e48d57113..f04703d0a 100644 --- a/otherarch/embeddings_adapter.cpp +++ b/otherarch/embeddings_adapter.cpp @@ -123,7 +123,7 @@ bool embeddingstype_load_model(const embeddings_load_model_inputs inputs) if(devices_override.size()>0) { - printf("\nOverriding with %d devices...\n",devices_override.size()-1); + printf("\nOverriding with %zu devices...\n",devices_override.size()-1); model_params.devices = devices_override.data(); } @@ -215,7 +215,7 @@ embeddings_generation_outputs embeddingstype_generate(const embeddings_generatio } if(embeddings_debug) { - printf("\n%s: Input too long, truncated from %d to last %d tokens.\n", __func__,oldsize,inp.size()); + printf("\n%s: Input too long, truncated from %d to last %zu tokens.\n", __func__,oldsize,inp.size()); } } else { printf("\n%s: number of tokens in an input (%lld) exceeds embedding size limit for this model (%lld), lower token amount!\n", @@ -232,7 +232,7 @@ embeddings_generation_outputs embeddingstype_generate(const embeddings_generatio { print_tok_vec(inp); } - printf("\nGenerating Embeddings for %d tokens...",inp.size()); + printf("\nGenerating Embeddings for %zu tokens...",inp.size()); // initialize batch const int n_prompts = 1; diff --git a/otherarch/gpt2_v2.cpp b/otherarch/gpt2_v2.cpp index 33ca85e11..b5b7f2063 100644 --- a/otherarch/gpt2_v2.cpp +++ b/otherarch/gpt2_v2.cpp @@ -287,8 +287,8 @@ ModelLoadResult gpt2_v2_model_load(const std::string & fname, gpt2_v2_model & mo } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%zu, %zu], expected [%zu, %zu]\n", + __func__, name.data(), (size_t)tensor->ne[0], (size_t)tensor->ne[1], (size_t)ne[0], (size_t)ne[1]); return ModelLoadResult::FAIL; } @@ -650,4 +650,4 @@ bool gpt2_v2_eval( ggml_v2_free(ctx0); return true; -} \ No newline at end of file +} diff --git a/otherarch/gptj_v2.cpp b/otherarch/gptj_v2.cpp index 97e885016..dc3a4128f 100644 --- a/otherarch/gptj_v2.cpp +++ b/otherarch/gptj_v2.cpp @@ -294,8 +294,8 @@ ModelLoadResult gptj_v2_model_load(const std::string & fname, gptj_v2_model & mo } else { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%zu, %zu], expected [%zu, %zu]\n", + __func__, name.data(), (size_t)tensor->ne[0], (size_t)tensor->ne[1], (size_t)ne[0], (size_t)ne[1]); return ModelLoadResult::FAIL; } @@ -572,4 +572,4 @@ bool gptj_v2_eval( ggml_v2_free(ctx0); return true; -} \ No newline at end of file +} diff --git a/otherarch/gptj_v3.cpp b/otherarch/gptj_v3.cpp index 653a4055f..a0cbf4d80 100644 --- a/otherarch/gptj_v3.cpp +++ b/otherarch/gptj_v3.cpp @@ -304,8 +304,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g } else { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%zu, %zu], expected [%zu, %zu]\n", + __func__, name.data(), (size_t)tensor->ne[0], (size_t)tensor->ne[1], (size_t)ne[0], (size_t)ne[1]); return ModelLoadResult::FAIL; } diff --git a/otherarch/rwkv_v2.cpp b/otherarch/rwkv_v2.cpp index ffc159d62..c2712d5c7 100644 --- a/otherarch/rwkv_v2.cpp +++ b/otherarch/rwkv_v2.cpp @@ -367,8 +367,8 @@ struct rwkv_v2_context * rwkv_v2_init_from_file(const char * file_path, uint32_t // Verify order of dimensions struct ggml_v2_tensor * emb = model->emb; RWKV_V2_ASSERT_NULL(emb->n_dims == 2, "Unexpected dimension count of embedding matrix %d", emb->n_dims); - RWKV_V2_ASSERT_NULL(emb->ne[0] == model->n_embed, "Unexpected dimension of embedding matrix %ld", emb->ne[0]); - RWKV_V2_ASSERT_NULL(emb->ne[1] == model->n_vocab, "Unexpected dimension of embedding matrix %ld", emb->ne[1]); + RWKV_V2_ASSERT_NULL(emb->ne[0] == model->n_embed, "Unexpected dimension of embedding matrix %zu", (size_t)emb->ne[0]); + RWKV_V2_ASSERT_NULL(emb->ne[1] == model->n_vocab, "Unexpected dimension of embedding matrix %zu", (size_t)emb->ne[1]); int32_t n_embed = model->n_embed; int32_t n_layer = model->n_layer; @@ -862,4 +862,4 @@ const char * rwkv_v2_get_system_info_string(void) { s += "VSX = " + std::to_string(ggml_v2_cpu_has_vsx()) + " | "; return s.c_str(); -} \ No newline at end of file +} diff --git a/otherarch/sdcpp/sdtype_adapter.cpp b/otherarch/sdcpp/sdtype_adapter.cpp index 9fdb2747c..3c827f951 100644 --- a/otherarch/sdcpp/sdtype_adapter.cpp +++ b/otherarch/sdcpp/sdtype_adapter.cpp @@ -428,7 +428,7 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) { if(sd_params->lora_specs.size()>0 && inputs.lora_multiplier>0) { - printf("\nApply %d LoRAs...\n",sd_params->lora_specs.size()); + printf("\nApply %zu LoRAs...\n",sd_params->lora_specs.size()); sd_params->lora_count = sd_params->lora_specs.size(); sd_ctx->sd->apply_loras(sd_params->lora_specs.data(), sd_params->lora_count); } diff --git a/otherarch/tts_adapter.cpp b/otherarch/tts_adapter.cpp index feab32a25..c9e01f960 100644 --- a/otherarch/tts_adapter.cpp +++ b/otherarch/tts_adapter.cpp @@ -607,7 +607,7 @@ bool ttstype_load_model(const tts_load_model_inputs inputs) if(devices_override.size()>0) { - printf("\nOverriding with %d devices...\n",devices_override.size()-1); + printf("\nOverriding with %zu devices...\n",devices_override.size()-1); tts_model_params.devices = devices_override.data(); } @@ -846,7 +846,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu //able to proceed, do nothing if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nReuse speaker ID=%d (%d tokens)...", last_speaker_seed, last_speaker_codes.size()); + printf("\nReuse speaker ID=%d (%zu tokens)...", last_speaker_seed, last_speaker_codes.size()); } } else if (custom_speaker_data!="" && custom_speaker_text!="") { //custom speaker json std::string speaker = format_audiotokens(custom_speaker_data,ttsver); @@ -854,7 +854,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu last_speaker_seed = speaker_seed; if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nCustom Speaker JSON (%d tokens)...", last_speaker_seed, last_speaker_codes.size()); + printf("\nCustom Speaker JSON seed=%d (%zu tokens)...", last_speaker_seed, last_speaker_codes.size()); } } else if (speaker_seed>=1 && speaker_seed<=5){ //special seeds std::string speaker = ""; @@ -880,7 +880,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu last_speaker_seed = speaker_seed; if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nSpecial ID=%d (%d tokens)...", last_speaker_seed, last_speaker_codes.size()); + printf("\nSpecial ID=%d (%zu tokens)...", last_speaker_seed, last_speaker_codes.size()); } } else { //generate the voice texture of our new speaker @@ -888,7 +888,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu guide_tokens = prepare_guide_tokens(ttcvocab,sampletext,ttsver); if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nGuide Tokens (%d tokens):\n", guide_tokens.size()); + printf("\nGuide Tokens (%zu tokens):\n", guide_tokens.size()); const std::string inp_txt = common_detokenize(ttc_ctx, guide_tokens, true); printf("%s,", inp_txt.c_str()); printf("\n"); @@ -897,7 +897,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu prompt_add(prompt_inp, ttcvocab, "<|text_end|>\n<|audio_start|>\n", false, true); if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nPrepare new speaker (%d input tokens)...\n", prompt_inp.size()); + printf("\nPrepare new speaker (%zu input tokens)...\n", prompt_inp.size()); print_tok_vec(prompt_inp); } kcpp_embd_batch tts_batch = kcpp_embd_batch(prompt_inp, 0, false, false); @@ -969,7 +969,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu last_speaker_seed = speaker_seed; if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nNew speaker ID=%d created (%d tokens)...", last_speaker_seed, last_speaker_codes.size()); + printf("\nNew speaker ID=%d created (%zu tokens)...", last_speaker_seed, last_speaker_codes.size()); const std::string inp_txt = common_detokenize(ttc_ctx, last_speaker_codes, true); printf("\n%s\n", inp_txt.c_str()); } @@ -985,7 +985,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu guide_tokens = prepare_guide_tokens(ttcvocab,prompt_clean,ttsver); if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nGuide Tokens (%d tokens):\n", guide_tokens.size()); + printf("\nGuide Tokens (%zu tokens):\n", guide_tokens.size()); const std::string inp_txt = common_detokenize(ttc_ctx, guide_tokens, true); printf("%s", inp_txt.c_str()); printf("\n"); @@ -998,7 +998,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu if(!tts_is_quiet) { - printf("\nTTS Processing (%d input tokens)...\n", prompt_inp.size()); + printf("\nTTS Processing (%zu input tokens)...\n", prompt_inp.size()); } prompt_add(prompt_inp, ttcvocab, "<|text_end|>\n<|audio_start|>\n", false, true); @@ -1011,7 +1011,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu if(!tts_is_quiet && ttsdebugmode==1) { - printf("\nDUMP TTS PROMPT (%d tokens):\n", prompt_inp.size()); + printf("\nDUMP TTS PROMPT (%zu tokens):\n", prompt_inp.size()); print_tok_vec(prompt_inp); const std::string inp_txt = common_detokenize(ttc_ctx, prompt_inp, true); printf("\n%s\n", inp_txt.c_str()); @@ -1084,7 +1084,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu if(!tts_is_quiet && ttsdebugmode==1) { const std::string inp_txt = common_detokenize(ttc_ctx, codes, true); - printf("\nGenerated %d Codes: '%s'\n",codes.size(), inp_txt.c_str()); + printf("\nGenerated %zu Codes: '%s'\n",codes.size(), inp_txt.c_str()); } // remove all non-audio tokens (i.e. < 151672 || > 155772) @@ -1104,7 +1104,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu return output; } kcpp_embd_batch codebatch = kcpp_embd_batch(codes,0,false,true); - printf("\nRunning Vocoder (%d AudioTokens)", codes.size()); + printf("\nRunning Vocoder (%zu AudioTokens)", codes.size()); if (llama_encode(cts_ctx, codebatch.batch) != 0) { printf("\nError: TTS vocoder generation failed!\n"); @@ -1256,4 +1256,4 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs) { return ttstype_generate_outetts(inputs); } -} \ No newline at end of file +} diff --git a/otherarch/ttscpp/src/phonemizer.cpp b/otherarch/ttscpp/src/phonemizer.cpp index 25dae55ac..8f38e5e0b 100644 --- a/otherarch/ttscpp/src/phonemizer.cpp +++ b/otherarch/ttscpp/src/phonemizer.cpp @@ -821,7 +821,7 @@ void populate_kokoro_ipa_map(std::string executable_path) } } myfile.close(); - printf("\nPopulated Kokoro IPA: %d entries\n", kokoro_ipa_map.size()); + printf("\nPopulated Kokoro IPA: %zu entries\n", kokoro_ipa_map.size()); } else { diff --git a/otherarch/ttscpp/src/tts_model.cpp b/otherarch/ttscpp/src/tts_model.cpp index d4e5f6087..801d5edb4 100644 --- a/otherarch/ttscpp/src/tts_model.cpp +++ b/otherarch/ttscpp/src/tts_model.cpp @@ -110,10 +110,10 @@ void tts_model::prep_buffers_and_context(bool cpu_only, float size_offset, uint3 }; if(dedicated_add_on_size>13000) { - printf("Clamp TTS addon memory %zu to 13000\n",dedicated_add_on_size); + printf("Clamp TTS addon memory %zu to 13000\n",(size_t)dedicated_add_on_size); dedicated_add_on_size = 13000; } - printf("TTS Memory Requested: %zu, with buffer %zu + %zu\n",ctx_size,tensor_meta.n_bytes,dedicated_add_on_size); + printf("TTS Memory Requested: %zu, with buffer %zu + %zu\n",ctx_size,tensor_meta.n_bytes,(size_t)dedicated_add_on_size); ctx = ggml_init(params); buf = ggml_backend_buft_alloc_buffer(buffer, tensor_meta.n_bytes + dedicated_add_on_size); } diff --git a/otherarch/whispercpp/whisper_adapter.cpp b/otherarch/whispercpp/whisper_adapter.cpp index b8f7237d4..ee26a8e5b 100644 --- a/otherarch/whispercpp/whisper_adapter.cpp +++ b/otherarch/whispercpp/whisper_adapter.cpp @@ -40,7 +40,7 @@ static bool read_audio(const std::string & b64data, std::vector& pcmf32) if(whisperdebugmode==1 && !whisper_is_quiet) { - printf("\nwav_data_size: %d",pcmf32.size()); + printf("\nwav_data_size: %zu",pcmf32.size()); } return true; diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 4b19c3bcb..d7630aaf9 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -4320,7 +4320,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i const int64_t blck_size = ggml_blck_size(type); if(d==0 && cur->ne[d] % blck_size != 0) { - printf("\nSkipping %s because %" PRId64 " is not divisible by %ld\n",name.c_str(),cur->ne[d],blck_size); + printf("\nSkipping %s because %zu is not divisible by %zu\n",name.c_str(),(size_t)cur->ne[d],(size_t)blck_size); quantize = false; break; }