Adjust int types in format strings (#2009)

* tweak format sting types
This may not be all of them, but it's the ones which warn on OpenBSD

* complete the changes needed to fix the format string specifers

* avoid using inttypes, directly cast to size_t (u64 usually) instead

---------

Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
This commit is contained in:
JustCommitRandomness 2026-03-06 11:06:18 +00:00 committed by GitHub
parent e36d7b6464
commit 2fbc3b2ae5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 37 additions and 37 deletions

View file

@ -2535,7 +2535,7 @@ static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std
void *import_ptr = nullptr) {
VK_LOG_DEBUG("ggml_vk_create_buffer(" << device->name << ", " << size << ", " << to_string(req_flags_list.begin()[0]) << ", " << to_string(req_flags_list.begin()[req_flags_list.size()-1]) << ")");
if (size > device->max_buffer_size) {
printf("\nWARNING: Requested buffer size (%zu) exceeds device max_buffer_size limit (%zu)!\n",size,device->max_buffer_size);
printf("\nWARNING: Requested buffer size (%zu) exceeds device max_buffer_size limit (%zu)!\n",size,(size_t)device->max_buffer_size);
}
vk_buffer buf = std::make_shared<vk_buffer_struct>();

View file

@ -2384,7 +2384,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
devices_override = kcpp_parse_device_list(dev_override_str);
if(devices_override.size()>0)
{
printf("\nOverriding with %d devices...\n",devices_override.size()-1);
printf("\nOverriding with %zu devices...\n",devices_override.size()-1);
model_params.devices = devices_override.data();
}
}
@ -2562,7 +2562,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
model_params.tensor_buft_overrides = tenos.data();
model_params.tensor_split = tensor_split_temp;
model_params.n_gpu_layers = -1; //must be this value to be considered default
printf("Autofit Reserve Space: %d MB\n",taxmb);
printf("Autofit Reserve Space: %zu MB\n",taxmb);
//disable log spam
bool dospam = (debugmode==1 && !is_quiet);
ggml_log_callback currlogger;
@ -5441,4 +5441,4 @@ int get_oldest_slot(int excludeSlotId)
}
}
return slotid;
}
}

View file

@ -123,7 +123,7 @@ bool embeddingstype_load_model(const embeddings_load_model_inputs inputs)
if(devices_override.size()>0)
{
printf("\nOverriding with %d devices...\n",devices_override.size()-1);
printf("\nOverriding with %zu devices...\n",devices_override.size()-1);
model_params.devices = devices_override.data();
}
@ -215,7 +215,7 @@ embeddings_generation_outputs embeddingstype_generate(const embeddings_generatio
}
if(embeddings_debug)
{
printf("\n%s: Input too long, truncated from %d to last %d tokens.\n", __func__,oldsize,inp.size());
printf("\n%s: Input too long, truncated from %d to last %zu tokens.\n", __func__,oldsize,inp.size());
}
} else {
printf("\n%s: number of tokens in an input (%lld) exceeds embedding size limit for this model (%lld), lower token amount!\n",
@ -232,7 +232,7 @@ embeddings_generation_outputs embeddingstype_generate(const embeddings_generatio
{
print_tok_vec(inp);
}
printf("\nGenerating Embeddings for %d tokens...",inp.size());
printf("\nGenerating Embeddings for %zu tokens...",inp.size());
// initialize batch
const int n_prompts = 1;

View file

@ -287,8 +287,8 @@ ModelLoadResult gpt2_v2_model_load(const std::string & fname, gpt2_v2_model & mo
}
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%d, %d]\n",
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%zu, %zu], expected [%zu, %zu]\n",
__func__, name.data(), (size_t)tensor->ne[0], (size_t)tensor->ne[1], (size_t)ne[0], (size_t)ne[1]);
return ModelLoadResult::FAIL;
}
@ -650,4 +650,4 @@ bool gpt2_v2_eval(
ggml_v2_free(ctx0);
return true;
}
}

View file

@ -294,8 +294,8 @@ ModelLoadResult gptj_v2_model_load(const std::string & fname, gptj_v2_model & mo
}
else
{
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%d, %d]\n",
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%zu, %zu], expected [%zu, %zu]\n",
__func__, name.data(), (size_t)tensor->ne[0], (size_t)tensor->ne[1], (size_t)ne[0], (size_t)ne[1]);
return ModelLoadResult::FAIL;
}
@ -572,4 +572,4 @@ bool gptj_v2_eval(
ggml_v2_free(ctx0);
return true;
}
}

View file

@ -304,8 +304,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
}
else
{
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%d, %d]\n",
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%zu, %zu], expected [%zu, %zu]\n",
__func__, name.data(), (size_t)tensor->ne[0], (size_t)tensor->ne[1], (size_t)ne[0], (size_t)ne[1]);
return ModelLoadResult::FAIL;
}

View file

@ -367,8 +367,8 @@ struct rwkv_v2_context * rwkv_v2_init_from_file(const char * file_path, uint32_t
// Verify order of dimensions
struct ggml_v2_tensor * emb = model->emb;
RWKV_V2_ASSERT_NULL(emb->n_dims == 2, "Unexpected dimension count of embedding matrix %d", emb->n_dims);
RWKV_V2_ASSERT_NULL(emb->ne[0] == model->n_embed, "Unexpected dimension of embedding matrix %ld", emb->ne[0]);
RWKV_V2_ASSERT_NULL(emb->ne[1] == model->n_vocab, "Unexpected dimension of embedding matrix %ld", emb->ne[1]);
RWKV_V2_ASSERT_NULL(emb->ne[0] == model->n_embed, "Unexpected dimension of embedding matrix %zu", (size_t)emb->ne[0]);
RWKV_V2_ASSERT_NULL(emb->ne[1] == model->n_vocab, "Unexpected dimension of embedding matrix %zu", (size_t)emb->ne[1]);
int32_t n_embed = model->n_embed;
int32_t n_layer = model->n_layer;
@ -862,4 +862,4 @@ const char * rwkv_v2_get_system_info_string(void) {
s += "VSX = " + std::to_string(ggml_v2_cpu_has_vsx()) + " | ";
return s.c_str();
}
}

View file

@ -428,7 +428,7 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
if(sd_params->lora_specs.size()>0 && inputs.lora_multiplier>0)
{
printf("\nApply %d LoRAs...\n",sd_params->lora_specs.size());
printf("\nApply %zu LoRAs...\n",sd_params->lora_specs.size());
sd_params->lora_count = sd_params->lora_specs.size();
sd_ctx->sd->apply_loras(sd_params->lora_specs.data(), sd_params->lora_count);
}

View file

@ -607,7 +607,7 @@ bool ttstype_load_model(const tts_load_model_inputs inputs)
if(devices_override.size()>0)
{
printf("\nOverriding with %d devices...\n",devices_override.size()-1);
printf("\nOverriding with %zu devices...\n",devices_override.size()-1);
tts_model_params.devices = devices_override.data();
}
@ -846,7 +846,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
//able to proceed, do nothing
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nReuse speaker ID=%d (%d tokens)...", last_speaker_seed, last_speaker_codes.size());
printf("\nReuse speaker ID=%d (%zu tokens)...", last_speaker_seed, last_speaker_codes.size());
}
} else if (custom_speaker_data!="" && custom_speaker_text!="") { //custom speaker json
std::string speaker = format_audiotokens(custom_speaker_data,ttsver);
@ -854,7 +854,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
last_speaker_seed = speaker_seed;
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nCustom Speaker JSON (%d tokens)...", last_speaker_seed, last_speaker_codes.size());
printf("\nCustom Speaker JSON seed=%d (%zu tokens)...", last_speaker_seed, last_speaker_codes.size());
}
} else if (speaker_seed>=1 && speaker_seed<=5){ //special seeds
std::string speaker = "";
@ -880,7 +880,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
last_speaker_seed = speaker_seed;
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nSpecial ID=%d (%d tokens)...", last_speaker_seed, last_speaker_codes.size());
printf("\nSpecial ID=%d (%zu tokens)...", last_speaker_seed, last_speaker_codes.size());
}
} else {
//generate the voice texture of our new speaker
@ -888,7 +888,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
guide_tokens = prepare_guide_tokens(ttcvocab,sampletext,ttsver);
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nGuide Tokens (%d tokens):\n", guide_tokens.size());
printf("\nGuide Tokens (%zu tokens):\n", guide_tokens.size());
const std::string inp_txt = common_detokenize(ttc_ctx, guide_tokens, true);
printf("%s,", inp_txt.c_str());
printf("\n");
@ -897,7 +897,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
prompt_add(prompt_inp, ttcvocab, "<|text_end|>\n<|audio_start|>\n", false, true);
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nPrepare new speaker (%d input tokens)...\n", prompt_inp.size());
printf("\nPrepare new speaker (%zu input tokens)...\n", prompt_inp.size());
print_tok_vec(prompt_inp);
}
kcpp_embd_batch tts_batch = kcpp_embd_batch(prompt_inp, 0, false, false);
@ -969,7 +969,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
last_speaker_seed = speaker_seed;
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nNew speaker ID=%d created (%d tokens)...", last_speaker_seed, last_speaker_codes.size());
printf("\nNew speaker ID=%d created (%zu tokens)...", last_speaker_seed, last_speaker_codes.size());
const std::string inp_txt = common_detokenize(ttc_ctx, last_speaker_codes, true);
printf("\n%s\n", inp_txt.c_str());
}
@ -985,7 +985,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
guide_tokens = prepare_guide_tokens(ttcvocab,prompt_clean,ttsver);
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nGuide Tokens (%d tokens):\n", guide_tokens.size());
printf("\nGuide Tokens (%zu tokens):\n", guide_tokens.size());
const std::string inp_txt = common_detokenize(ttc_ctx, guide_tokens, true);
printf("%s", inp_txt.c_str());
printf("\n");
@ -998,7 +998,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
if(!tts_is_quiet)
{
printf("\nTTS Processing (%d input tokens)...\n", prompt_inp.size());
printf("\nTTS Processing (%zu input tokens)...\n", prompt_inp.size());
}
prompt_add(prompt_inp, ttcvocab, "<|text_end|>\n<|audio_start|>\n", false, true);
@ -1011,7 +1011,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
if(!tts_is_quiet && ttsdebugmode==1)
{
printf("\nDUMP TTS PROMPT (%d tokens):\n", prompt_inp.size());
printf("\nDUMP TTS PROMPT (%zu tokens):\n", prompt_inp.size());
print_tok_vec(prompt_inp);
const std::string inp_txt = common_detokenize(ttc_ctx, prompt_inp, true);
printf("\n%s\n", inp_txt.c_str());
@ -1084,7 +1084,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
if(!tts_is_quiet && ttsdebugmode==1)
{
const std::string inp_txt = common_detokenize(ttc_ctx, codes, true);
printf("\nGenerated %d Codes: '%s'\n",codes.size(), inp_txt.c_str());
printf("\nGenerated %zu Codes: '%s'\n",codes.size(), inp_txt.c_str());
}
// remove all non-audio tokens (i.e. < 151672 || > 155772)
@ -1104,7 +1104,7 @@ static tts_generation_outputs ttstype_generate_outetts(const tts_generation_inpu
return output;
}
kcpp_embd_batch codebatch = kcpp_embd_batch(codes,0,false,true);
printf("\nRunning Vocoder (%d AudioTokens)", codes.size());
printf("\nRunning Vocoder (%zu AudioTokens)", codes.size());
if (llama_encode(cts_ctx, codebatch.batch) != 0) {
printf("\nError: TTS vocoder generation failed!\n");
@ -1256,4 +1256,4 @@ tts_generation_outputs ttstype_generate(const tts_generation_inputs inputs)
{
return ttstype_generate_outetts(inputs);
}
}
}

View file

@ -821,7 +821,7 @@ void populate_kokoro_ipa_map(std::string executable_path)
}
}
myfile.close();
printf("\nPopulated Kokoro IPA: %d entries\n", kokoro_ipa_map.size());
printf("\nPopulated Kokoro IPA: %zu entries\n", kokoro_ipa_map.size());
}
else
{

View file

@ -110,10 +110,10 @@ void tts_model::prep_buffers_and_context(bool cpu_only, float size_offset, uint3
};
if(dedicated_add_on_size>13000)
{
printf("Clamp TTS addon memory %zu to 13000\n",dedicated_add_on_size);
printf("Clamp TTS addon memory %zu to 13000\n",(size_t)dedicated_add_on_size);
dedicated_add_on_size = 13000;
}
printf("TTS Memory Requested: %zu, with buffer %zu + %zu\n",ctx_size,tensor_meta.n_bytes,dedicated_add_on_size);
printf("TTS Memory Requested: %zu, with buffer %zu + %zu\n",ctx_size,tensor_meta.n_bytes,(size_t)dedicated_add_on_size);
ctx = ggml_init(params);
buf = ggml_backend_buft_alloc_buffer(buffer, tensor_meta.n_bytes + dedicated_add_on_size);
}

View file

@ -40,7 +40,7 @@ static bool read_audio(const std::string & b64data, std::vector<float>& pcmf32)
if(whisperdebugmode==1 && !whisper_is_quiet)
{
printf("\nwav_data_size: %d",pcmf32.size());
printf("\nwav_data_size: %zu",pcmf32.size());
}
return true;

View file

@ -4320,7 +4320,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
const int64_t blck_size = ggml_blck_size(type);
if(d==0 && cur->ne[d] % blck_size != 0)
{
printf("\nSkipping %s because %" PRId64 " is not divisible by %ld\n",name.c_str(),cur->ne[d],blck_size);
printf("\nSkipping %s because %zu is not divisible by %zu\n",name.c_str(),(size_t)cur->ne[d],(size_t)blck_size);
quantize = false;
break;
}