mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
handle gguf already containing renamed diffusion tensors prefix
This commit is contained in:
parent
0d06e95548
commit
7b5cf7143f
3 changed files with 41 additions and 3 deletions
|
@ -82,6 +82,39 @@ void print_tok_vec(std::vector<float> &embd)
|
|||
std::cout << "]\n";
|
||||
}
|
||||
|
||||
bool gguf_tensor_exists(const std::string & gguf_filename, std::string tensor_name, bool exactmatch)
|
||||
{
|
||||
struct gguf_init_params ggufparams;
|
||||
ggufparams.no_alloc = true;
|
||||
ggufparams.ctx = NULL;
|
||||
struct gguf_context * ctx = gguf_init_from_file(gguf_filename.c_str(), ggufparams);
|
||||
if (!ctx) return false;
|
||||
|
||||
bool found = false;
|
||||
|
||||
int n_tensors = gguf_get_n_tensors(ctx);
|
||||
for (int i = 0; i < n_tensors; i++) {
|
||||
std::string curr_name = gguf_get_tensor_name(ctx, i);
|
||||
if(exactmatch)
|
||||
{
|
||||
if (curr_name == tensor_name) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (curr_name.find(tensor_name) != std::string::npos) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gguf_free(ctx);
|
||||
return found;
|
||||
}
|
||||
|
||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||
FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta)
|
||||
{
|
||||
|
|
|
@ -131,6 +131,7 @@ FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fi
|
|||
void ContextFastForward(std::vector<int> ¤t_context_tokens, std::vector<int> &embd_inp,
|
||||
int &n_past, std::vector<int> &last_n_tokens, const int nctx, std::vector<int> &smartcontext,
|
||||
const bool useSmartContext, const bool requireFullSubset);
|
||||
bool gguf_tensor_exists(const std::string & filename, std::string tensor_name, bool exactmatch);
|
||||
|
||||
size_t gpttype_calc_new_state_kv();
|
||||
size_t gpttype_calc_new_state_tokencount();
|
||||
|
|
|
@ -232,11 +232,15 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
|
|||
//if t5 is set, and model is a gguf, load it as a diffusion model path
|
||||
bool endswithgguf = (sd_params->model_path.rfind(".gguf") == sd_params->model_path.size() - 5);
|
||||
if(sd_params->t5xxl_path!="" && endswithgguf)
|
||||
{
|
||||
//extra check - make sure there is no diffusion model prefix already inside!
|
||||
if(!gguf_tensor_exists(sd_params->model_path,"model.diffusion_model.",false))
|
||||
{
|
||||
printf("\nSwap to Diffusion Model Path:%s",sd_params->model_path.c_str());
|
||||
sd_params->diffusion_model_path = sd_params->model_path;
|
||||
sd_params->model_path = "";
|
||||
}
|
||||
}
|
||||
|
||||
sddebugmode = inputs.debugmode;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue