handle gguf already containing renamed diffusion tensors prefix

This commit is contained in:
Concedo 2025-08-12 22:42:29 +08:00
parent 0d06e95548
commit 7b5cf7143f
3 changed files with 41 additions and 3 deletions

View file

@ -82,6 +82,39 @@ void print_tok_vec(std::vector<float> &embd)
std::cout << "]\n"; std::cout << "]\n";
} }
bool gguf_tensor_exists(const std::string & gguf_filename, std::string tensor_name, bool exactmatch)
{
struct gguf_init_params ggufparams;
ggufparams.no_alloc = true;
ggufparams.ctx = NULL;
struct gguf_context * ctx = gguf_init_from_file(gguf_filename.c_str(), ggufparams);
if (!ctx) return false;
bool found = false;
int n_tensors = gguf_get_n_tensors(ctx);
for (int i = 0; i < n_tensors; i++) {
std::string curr_name = gguf_get_tensor_name(ctx, i);
if(exactmatch)
{
if (curr_name == tensor_name) {
found = true;
break;
}
}
else
{
if (curr_name.find(tensor_name) != std::string::npos) {
found = true;
break;
}
}
}
gguf_free(ctx);
return found;
}
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta) FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta)
{ {

View file

@ -131,6 +131,7 @@ FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fi
void ContextFastForward(std::vector<int> &current_context_tokens, std::vector<int> &embd_inp, void ContextFastForward(std::vector<int> &current_context_tokens, std::vector<int> &embd_inp,
int &n_past, std::vector<int> &last_n_tokens, const int nctx, std::vector<int> &smartcontext, int &n_past, std::vector<int> &last_n_tokens, const int nctx, std::vector<int> &smartcontext,
const bool useSmartContext, const bool requireFullSubset); const bool useSmartContext, const bool requireFullSubset);
bool gguf_tensor_exists(const std::string & filename, std::string tensor_name, bool exactmatch);
size_t gpttype_calc_new_state_kv(); size_t gpttype_calc_new_state_kv();
size_t gpttype_calc_new_state_tokencount(); size_t gpttype_calc_new_state_tokencount();

View file

@ -233,9 +233,13 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
bool endswithgguf = (sd_params->model_path.rfind(".gguf") == sd_params->model_path.size() - 5); bool endswithgguf = (sd_params->model_path.rfind(".gguf") == sd_params->model_path.size() - 5);
if(sd_params->t5xxl_path!="" && endswithgguf) if(sd_params->t5xxl_path!="" && endswithgguf)
{ {
printf("\nSwap to Diffusion Model Path:%s",sd_params->model_path.c_str()); //extra check - make sure there is no diffusion model prefix already inside!
sd_params->diffusion_model_path = sd_params->model_path; if(!gguf_tensor_exists(sd_params->model_path,"model.diffusion_model.",false))
sd_params->model_path = ""; {
printf("\nSwap to Diffusion Model Path:%s",sd_params->model_path.c_str());
sd_params->diffusion_model_path = sd_params->model_path;
sd_params->model_path = "";
}
} }
sddebugmode = inputs.debugmode; sddebugmode = inputs.debugmode;