mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2026-05-06 16:21:49 +00:00
support for customizing LoRA multipliers through the sdapi (#1982)
* fix corner case in sd_oai_transform_params Also fix typo in the function name. * support for customizing loaded LoRA multipliers The `sdloramult` flag now accepts a list of multipliers, one for each LoRA. If all multipliers are non-zero, LoRAs load as before, with no extra VRAM usage or performance impact. If any LoRA has a multiplier of 0, we switch to `at_runtime` mode, and these LoRAs will be available to multiplier changes via the `lora` sdapi field and show up in the `sdapi/v1/loras` endpoint. All LoRAs are still preloaded on startup, and cached to avoid file reloads. If the list of multipliers is shorter than the list of LoRAs, the multiplier list is extended with the first multiplier (1.0 by default), to keep it compatible with the previous behavior. * support for `<lora:name:multiplier>` prompt syntax and metadata * add a few tests for sanitize_lora_multipliers
This commit is contained in:
parent
eafb5ff4c5
commit
3f42ed1af7
5 changed files with 325 additions and 61 deletions
8
expose.h
8
expose.h
|
|
@ -6,7 +6,6 @@ const int images_max = 8;
|
|||
const int audio_max = 4;
|
||||
const int logprobs_max = 10;
|
||||
const int overridekv_max = 16;
|
||||
const int lora_filenames_max = 4;
|
||||
|
||||
// match kobold's sampler list and order
|
||||
enum samplers
|
||||
|
|
@ -189,8 +188,9 @@ struct sd_load_model_inputs
|
|||
const char * clip1_filename = nullptr;
|
||||
const char * clip2_filename = nullptr;
|
||||
const char * vae_filename = nullptr;
|
||||
const char * lora_filenames[lora_filenames_max] = {};
|
||||
const float lora_multiplier = 1.0f;
|
||||
const int lora_len = 0;
|
||||
const char ** lora_filenames = nullptr;
|
||||
const float * lora_multipliers = nullptr;
|
||||
const int lora_apply_mode = 0;
|
||||
const char * photomaker_filename = nullptr;
|
||||
const char * upscaler_filename = nullptr;
|
||||
|
|
@ -227,6 +227,8 @@ struct sd_generation_inputs
|
|||
const bool circular_x = false;
|
||||
const bool circular_y = false;
|
||||
const bool upscale = false;
|
||||
const int lora_len = 0;
|
||||
const float * lora_multipliers = nullptr;
|
||||
};
|
||||
struct sd_generation_outputs
|
||||
{
|
||||
|
|
|
|||
159
koboldcpp.py
159
koboldcpp.py
|
|
@ -89,6 +89,7 @@ ttsmodelpath = "" #if empty, not initialized
|
|||
embeddingsmodelpath = "" #if empty, not initialized
|
||||
musicllmmodelpath = "" #if empty, not initialized
|
||||
musicdiffusionmodelpath = "" #if empty, not initialized
|
||||
imglorainfo = []
|
||||
maxctx = 8192
|
||||
maxhordectx = 0 #set to whatever maxctx is if 0
|
||||
maxhordelen = 1024
|
||||
|
|
@ -320,8 +321,9 @@ class sd_load_model_inputs(ctypes.Structure):
|
|||
("clip1_filename", ctypes.c_char_p),
|
||||
("clip2_filename", ctypes.c_char_p),
|
||||
("vae_filename", ctypes.c_char_p),
|
||||
("lora_filenames", ctypes.c_char_p * lora_filenames_max),
|
||||
("lora_multiplier", ctypes.c_float),
|
||||
("lora_len", ctypes.c_int),
|
||||
("lora_filenames", ctypes.POINTER(ctypes.c_char_p)),
|
||||
("lora_multipliers", ctypes.POINTER(ctypes.c_float)),
|
||||
("lora_apply_mode", ctypes.c_int),
|
||||
("photomaker_filename", ctypes.c_char_p),
|
||||
("upscaler_filename", ctypes.c_char_p),
|
||||
|
|
@ -356,7 +358,9 @@ class sd_generation_inputs(ctypes.Structure):
|
|||
("remove_limits", ctypes.c_bool),
|
||||
("circular_x", ctypes.c_bool),
|
||||
("circular_y", ctypes.c_bool),
|
||||
("upscale", ctypes.c_bool)]
|
||||
("upscale", ctypes.c_bool),
|
||||
("lora_len", ctypes.c_int),
|
||||
("lora_multipliers", ctypes.POINTER(ctypes.c_float))]
|
||||
|
||||
class sd_generation_outputs(ctypes.Structure):
|
||||
_fields_ = [("status", ctypes.c_int),
|
||||
|
|
@ -1994,30 +1998,38 @@ def sd_load_model(model_filename,vae_filename,lora_filenames,t5xxl_filename,clip
|
|||
inputs.taesd = True if args.sdvaeauto else False
|
||||
inputs.tiled_vae_threshold = args.sdtiledvae
|
||||
inputs.vae_filename = vae_filename.encode("UTF-8")
|
||||
for n in range(lora_filenames_max):
|
||||
if n >= len(lora_filenames):
|
||||
inputs.lora_filenames[n] = "".encode("UTF-8")
|
||||
else:
|
||||
inputs.lora_filenames[n] = lora_filenames[n].encode("UTF-8")
|
||||
|
||||
inputs.lora_multiplier = args.sdloramult
|
||||
inputs.t5xxl_filename = t5xxl_filename.encode("UTF-8")
|
||||
inputs.clip1_filename = clip1_filename.encode("UTF-8")
|
||||
inputs.clip2_filename = clip2_filename.encode("UTF-8")
|
||||
inputs.photomaker_filename = photomaker_filename.encode("UTF-8")
|
||||
inputs.upscaler_filename = upscaler_filename.encode("UTF-8")
|
||||
|
||||
lora_filenames = [l.encode("UTF-8") for l in lora_filenames[:lora_filenames_max] if l]
|
||||
lora_len = len(lora_filenames)
|
||||
lora_multipliers = args.sdloramult[:lora_len]
|
||||
if len(lora_multipliers) < lora_len:
|
||||
missing = lora_len - len(lora_multipliers)
|
||||
if len(lora_multipliers) == 1:
|
||||
# previous behavior: all get the same weight
|
||||
lora_multipliers.extend(lora_multipliers * missing)
|
||||
else:
|
||||
lora_multipliers.extend([0.] * missing)
|
||||
inputs.lora_len = lora_len
|
||||
inputs.lora_filenames = (ctypes.c_char_p * lora_len)(*lora_filenames)
|
||||
inputs.lora_multipliers = (ctypes.c_float * lora_len)(*lora_multipliers)
|
||||
# auto if no zero-weight lora, dynamic otherwise
|
||||
inputs.lora_apply_mode = 3 if 0. in inputs.lora_multipliers else 0
|
||||
|
||||
inputs.img_hard_limit = args.sdclamped
|
||||
inputs.img_soft_limit = args.sdclampedsoft
|
||||
inputs.lora_apply_mode = 0 #auto for now
|
||||
inputs = set_backend_props(inputs)
|
||||
ret = handle.sd_load_model(inputs)
|
||||
return ret
|
||||
|
||||
def sd_oai_tranform_params(genparams):
|
||||
size = genparams.get('size', "512x512")
|
||||
if size and size!="":
|
||||
pattern = r'^\D*(\d+)x(\d+)$'
|
||||
match = re.fullmatch(pattern, size)
|
||||
def sd_oai_transform_params(genparams):
|
||||
size = genparams.get('size') or ''
|
||||
pattern = r'^\D*(\d+)x(\d+)$'
|
||||
match = re.fullmatch(pattern, size)
|
||||
if match:
|
||||
width = int(match.group(1))
|
||||
height = int(match.group(2))
|
||||
|
|
@ -2111,6 +2123,69 @@ def sd_upscale(genparams):
|
|||
data_main = ret.data.decode("UTF-8","ignore")
|
||||
return data_main
|
||||
|
||||
def sanitize_lora_multipliers(sdloramult):
|
||||
if sdloramult is None:
|
||||
sdloramult = [1.0]
|
||||
elif not isinstance(sdloramult, list):
|
||||
sdloramult = [sdloramult]
|
||||
sdloramult = [tryparsefloat(m, 0.) for m in sdloramult]
|
||||
return sdloramult
|
||||
|
||||
def prepare_lora_multipliers(request_list):
|
||||
orig_multipliers = [lora[3] for lora in imglorainfo]
|
||||
req_by_path = {}
|
||||
for r in request_list:
|
||||
if not isinstance(r, dict):
|
||||
continue
|
||||
multiplier = tryparsefloat(r.get('multiplier'), 0.)
|
||||
path = r.get('path')
|
||||
if path and isinstance(path, str):
|
||||
req_by_path[path] = req_by_path.get(path, 0.) + multiplier
|
||||
result = []
|
||||
for i, (fullpath, name, path, origmul) in enumerate(imglorainfo):
|
||||
multiplier = orig_multipliers[i]
|
||||
if multiplier == 0. and path in req_by_path:
|
||||
multiplier = req_by_path[path]
|
||||
result.append(multiplier)
|
||||
return result
|
||||
|
||||
def extract_loras_from_prompt(prompt):
|
||||
pattern = r'<lora:([^:>]+):([^>]+)>'
|
||||
lora_data = []
|
||||
matches = list(re.finditer(pattern, prompt))
|
||||
for match in matches:
|
||||
raw_path = match.group(1)
|
||||
raw_mul = match.group(2)
|
||||
try:
|
||||
mul = float(raw_mul)
|
||||
except ValueError:
|
||||
continue
|
||||
is_high_noise = False
|
||||
prefix = "|high_noise|"
|
||||
if raw_path.startswith(prefix):
|
||||
raw_path = raw_path[len(prefix):]
|
||||
is_high_noise = True
|
||||
item = {'name': raw_path, 'multiplier': mul}
|
||||
if is_high_noise:
|
||||
item["is_high_noise"] = is_high_noise
|
||||
lora_data.append(item)
|
||||
prompt = prompt.replace(match.group(0), "", 1)
|
||||
return prompt, lora_data
|
||||
|
||||
def lora_map_name_to_path(request_list):
|
||||
name2path = {}
|
||||
for _, name, path, _ in imglorainfo:
|
||||
name2path[name] = path
|
||||
result = []
|
||||
for req in request_list:
|
||||
out = dict(req)
|
||||
name = out.pop('name')
|
||||
path = name2path.get(name)
|
||||
if path:
|
||||
out['path'] = path
|
||||
result.append(out)
|
||||
return result
|
||||
|
||||
def sd_generate(genparams):
|
||||
global maxctx, args, currentusergenkey, totalgens, pendingabortkey, chatcompl_adapter
|
||||
|
||||
|
|
@ -2209,6 +2284,11 @@ def sd_generate(genparams):
|
|||
inputs.circular_x = tryparseint(adapter_obj.get("circular_x", genparams.get("circular_x",0)),0)
|
||||
inputs.circular_y = tryparseint(adapter_obj.get("circular_y", genparams.get("circular_y",0)),0)
|
||||
inputs.upscale = (True if tryparseint(genparams.get("enable_hr", 0),0) else False)
|
||||
|
||||
lora_multipliers = prepare_lora_multipliers(genparams.get("lora", []))
|
||||
inputs.lora_len = len(lora_multipliers)
|
||||
inputs.lora_multipliers = (ctypes.c_float * inputs.lora_len)(*lora_multipliers)
|
||||
|
||||
ret = handle.sd_generate(inputs)
|
||||
data_main = ""
|
||||
data_extra = ""
|
||||
|
|
@ -4144,6 +4224,9 @@ Change Mode<br>
|
|||
elif clean_path.endswith('/v1/models') or clean_path=='/models':
|
||||
response_body = (json.dumps({"object":"list","data":[{"id":friendlymodelname,"object":"model","created":int(time.time()),"owned_by":"koboldcpp","permission":[],"root":"koboldcpp"}]}).encode())
|
||||
|
||||
elif clean_path.endswith('/sdapi/v1/loras'):
|
||||
response_body = (json.dumps([{'name': name, 'path': path} for _, name, path, multiplier in imglorainfo if multiplier == 0.])).encode()
|
||||
|
||||
elif clean_path.endswith('/sdapi/v1/upscalers'):
|
||||
if args.sdupscaler:
|
||||
response_body = (json.dumps([{"name":"ESRGAN_4x","model_name":"ESRGAN_4x","model_path":"upscaler_model.gguf","model_url":None,"scale":4}]).encode())
|
||||
|
|
@ -5152,7 +5235,13 @@ Change Mode<br>
|
|||
lastgeneratedcomfyimg = b''
|
||||
genparams = sd_comfyui_tranform_params(genparams)
|
||||
elif is_oai_imggen:
|
||||
genparams = sd_oai_tranform_params(genparams)
|
||||
genparams = sd_oai_transform_params(genparams)
|
||||
if not genparams.get('lora'):
|
||||
# process <lora:name:multiplier> syntax
|
||||
prompt, loras = extract_loras_from_prompt(genparams['prompt'])
|
||||
if loras:
|
||||
genparams['prompt'] = prompt
|
||||
genparams['lora'] = lora_map_name_to_path(loras)
|
||||
gen = sd_generate(genparams)
|
||||
gendat = gen["data"]
|
||||
genanim = gen["animated"]
|
||||
|
|
@ -6982,9 +7071,10 @@ def show_gui():
|
|||
args.sdquant = sd_quant_option(sd_quant_var.get())
|
||||
if sd_lora_var.get() != "":
|
||||
args.sdlora = [item.strip() for item in sd_lora_var.get().split("|") if item]
|
||||
args.sdloramult = float(sd_loramult_var.get())
|
||||
else:
|
||||
args.sdlora = None
|
||||
# XXX the user may have used '|' since it's used for the LoRAs
|
||||
args.sdloramult = sanitize_lora_multipliers(re.split(r"[ |]+", sd_loramult_var.get()))
|
||||
|
||||
if gen_defaults_var.get() != "":
|
||||
args.gendefaults = gen_defaults_var.get()
|
||||
|
|
@ -7243,7 +7333,7 @@ def show_gui():
|
|||
sd_lora_var.set(dict["sdlora"] if ("sdlora" in dict and dict["sdlora"]) else "")
|
||||
else:
|
||||
sd_lora_var.set("")
|
||||
sd_loramult_var.set(str(dict["sdloramult"]) if ("sdloramult" in dict and dict["sdloramult"]) else "1.0")
|
||||
sd_loramult_var.set(" ".join(f"{n:.3f}".rstrip('0').rstrip('.') for n in dict.get("sdloramult", [])))
|
||||
gen_defaults_var.set(dict["gendefaults"] if ("gendefaults" in dict and dict["gendefaults"]) else "")
|
||||
gen_defaults_overwrite_var.set(1 if "gendefaultsoverwrite" in dict and dict["gendefaultsoverwrite"] else 0)
|
||||
|
||||
|
|
@ -7687,6 +7777,8 @@ def convert_invalid_args(args):
|
|||
dict["noflashattention"] = not dict["flashattention"]
|
||||
if "sdlora" in dict and isinstance(dict["sdlora"], str):
|
||||
dict["sdlora"] = ([dict["sdlora"]] if dict["sdlora"] else None)
|
||||
if "sdloramult" in dict:
|
||||
dict["sdloramult"] = sanitize_lora_multipliers(dict["sdloramult"])
|
||||
return args
|
||||
|
||||
def setuptunnel(global_memory, has_sd):
|
||||
|
|
@ -8371,6 +8463,30 @@ def main(launch_args, default_args):
|
|||
print("Press ENTER key to exit.", flush=True)
|
||||
input()
|
||||
|
||||
|
||||
def mk_lora_info(imgloras, multipliers):
|
||||
# (full path, name, name+extension, can change multiplier)
|
||||
# XXX for each LoRA, sdapi needs a name and a path; we could use
|
||||
# the full filename as a path, but we don't know if we can expose it
|
||||
used_lora_names = set()
|
||||
result = []
|
||||
for i, lora_path in enumerate(imgloras):
|
||||
multiplier = 0. if i >= len(multipliers) else multipliers[i]
|
||||
lora_file = os.path.basename(lora_path)
|
||||
lora_name, lora_ext = os.path.splitext(lora_file)
|
||||
# ensure unique names
|
||||
i = 1
|
||||
mapped_name = lora_name
|
||||
while True:
|
||||
if mapped_name not in used_lora_names:
|
||||
result.append((lora_path, mapped_name, mapped_name + lora_ext, multiplier))
|
||||
used_lora_names.add(mapped_name)
|
||||
break
|
||||
i += 1
|
||||
mapped_name = lora_name + '_' + str(i)
|
||||
return result
|
||||
|
||||
|
||||
def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
|
||||
global embedded_kailite, embedded_kcpp_docs, embedded_kcpp_sdui, embedded_kailite_gz, embedded_kcpp_docs_gz, embedded_kcpp_sdui_gz, embedded_lcpp_ui_gz, embedded_musicui, embedded_musicui_gz, start_time, exitcounter, global_memory, using_gui_launcher
|
||||
global libname, args, friendlymodelname, friendlysdmodelname, fullsdmodelpath, password, fullwhispermodelpath, ttsmodelpath, embeddingsmodelpath, musicdiffusionmodelpath, musicllmmodelpath, friendlyembeddingsmodelname, has_audio_support, has_vision_support, cached_chat_template
|
||||
|
|
@ -8820,6 +8936,9 @@ def kcpp_main_process(launch_args, g_memory=None, gui_launcher=False):
|
|||
imgloras.append(os.path.abspath(curr))
|
||||
else:
|
||||
print(f"Missing SD LORA model file {curr}...")
|
||||
global imglorainfo
|
||||
args.sdloramult = sanitize_lora_multipliers(args.sdloramult)
|
||||
imglorainfo = mk_lora_info(imgloras, args.sdloramult)
|
||||
if args.sdvae:
|
||||
if os.path.exists(args.sdvae):
|
||||
imgvae = os.path.abspath(args.sdvae)
|
||||
|
|
@ -9415,7 +9534,7 @@ if __name__ == '__main__':
|
|||
sdparsergrouplora = sdparsergroup.add_mutually_exclusive_group()
|
||||
sdparsergrouplora.add_argument("--sdquant", metavar=('[quantization level 0/1/2]'), help="If specified, loads the model quantized to save memory. 0=off, 1=q8, 2=q4", type=int, choices=[0,1,2], nargs="?", const=2, default=0)
|
||||
sdparsergrouplora.add_argument("--sdlora", metavar=('[filename]'), help="Specify image generation LoRAs safetensors models to be applied. Multiple LoRAs are accepted.", nargs='+')
|
||||
sdparsergroup.add_argument("--sdloramult", metavar=('[amount]'), help="Multiplier for the image LoRA model to be applied.", type=float, default=1.0)
|
||||
sdparsergroup.add_argument("--sdloramult", metavar=('[amounts]'), help="Multipliers for the image LoRA model to be applied.", type=float, nargs='+', default=[1.0])
|
||||
sdparsergroup.add_argument("--sdtiledvae", metavar=('[maxres]'), help="Adjust the automatic VAE tiling trigger for images above this size. 0 disables vae tiling.", type=int, default=default_vae_tile_threshold)
|
||||
whisperparsergroup = parser.add_argument_group('Whisper Transcription Commands')
|
||||
whisperparsergroup.add_argument("--whispermodel", metavar=('[filename]'), help="Specify a Whisper .bin model to enable Speech-To-Text transcription.", default="")
|
||||
|
|
|
|||
|
|
@ -80,8 +80,8 @@ struct SDParams {
|
|||
bool chroma_use_dit_mask = true;
|
||||
|
||||
std::vector<std::string> lora_paths;
|
||||
std::vector<sd_lora_t> lora_specs;
|
||||
uint32_t lora_count;
|
||||
std::vector<float> lora_multipliers;
|
||||
bool lora_dynamic = false;
|
||||
};
|
||||
|
||||
//shared
|
||||
|
|
@ -208,14 +208,12 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
|
|||
set_sd_quiet(sd_is_quiet);
|
||||
executable_path = inputs.executable_path;
|
||||
std::string taesdpath = "";
|
||||
std::vector<std::string> lorafilenames;
|
||||
for(int i=0;i<lora_filenames_max;++i)
|
||||
std::vector<std::string> lora_paths;
|
||||
std::vector<float> lora_multipliers;
|
||||
for(int i=0;i<inputs.lora_len;++i)
|
||||
{
|
||||
std::string temp = inputs.lora_filenames[i];
|
||||
if(temp!="")
|
||||
{
|
||||
lorafilenames.push_back(temp);
|
||||
}
|
||||
lora_paths.push_back(inputs.lora_filenames[i]);
|
||||
lora_multipliers.push_back(inputs.lora_multipliers[i]);
|
||||
}
|
||||
std::string vaefilename = inputs.vae_filename;
|
||||
std::string t5xxl_filename = inputs.t5xxl_filename;
|
||||
|
|
@ -230,19 +228,28 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
|
|||
cfg_square_limit = inputs.img_soft_limit;
|
||||
printf("\nImageGen Init - Load Model: %s\n",inputs.model_filename);
|
||||
|
||||
int lora_apply_mode = std::max(0, std::min(2, inputs.lora_apply_mode));
|
||||
int lora_apply_mode = LORA_APPLY_AT_RUNTIME;
|
||||
bool lora_dynamic = false;
|
||||
if(inputs.lora_apply_mode >= 0 && inputs.lora_apply_mode <= 2) {
|
||||
lora_apply_mode = inputs.lora_apply_mode;
|
||||
}
|
||||
else if(inputs.lora_apply_mode == 3) {
|
||||
lora_dynamic = true;
|
||||
}
|
||||
|
||||
if(lorafilenames.size()>0)
|
||||
if(lora_paths.size() > 0)
|
||||
{
|
||||
for(int i=0;i<lorafilenames.size();++i)
|
||||
const char* lora_apply_mode_name = lora_apply_mode == 1 ? "immediately"
|
||||
: lora_apply_mode == 2 ? "at runtime"
|
||||
: "auto";
|
||||
const char * lora_dynamic_name = lora_dynamic ? " (dynamic)" : "";
|
||||
printf("With LoRAs in apply mode %s%s:\n", lora_apply_mode_name, lora_dynamic_name);
|
||||
for(int i=0;i<lora_paths.size();++i)
|
||||
{
|
||||
const char* lora_apply_mode_name = lora_apply_mode == 1 ? "immediately"
|
||||
: lora_apply_mode == 2 ? "at runtime"
|
||||
: "auto";
|
||||
printf("With LoRA: %s at %f power, apply mode: %s\n",
|
||||
lorafilenames[i].c_str(),inputs.lora_multiplier,lora_apply_mode_name);
|
||||
printf(" %s at %f power\n", lora_paths[i].c_str(),lora_multipliers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if(inputs.taesd)
|
||||
{
|
||||
taesdpath = executable_path + "embd_res/taesd.embd";
|
||||
|
|
@ -327,7 +334,9 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
|
|||
sd_params->clip_l_path = clip1_filename;
|
||||
sd_params->clip_g_path = clip2_filename;
|
||||
sd_params->stacked_id_embeddings_path = photomaker_filename;
|
||||
sd_params->lora_paths = lorafilenames;
|
||||
sd_params->lora_paths = lora_paths;
|
||||
sd_params->lora_multipliers = lora_multipliers;
|
||||
sd_params->lora_dynamic = lora_dynamic;
|
||||
//if t5 is set, and model is a gguf, load it as a diffusion model path
|
||||
bool endswithgguf = (sd_params->model_path.rfind(".gguf") == sd_params->model_path.size() - 5);
|
||||
if((sd_params->t5xxl_path!="" || sd_params->clip_l_path!="" || sd_params->clip_g_path!="") && endswithgguf)
|
||||
|
|
@ -416,21 +425,22 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
|
|||
std::filesystem::path mpath(inputs.model_filename);
|
||||
sdmodelfilename = mpath.filename().string();
|
||||
|
||||
sd_params->lora_specs.clear();
|
||||
sd_params->lora_specs.reserve(lora_filenames_max*2);
|
||||
// preload the LoRAs with the initial multipliers
|
||||
std::vector<sd_lora_t> lora_specs;
|
||||
for(int i=0;i<sd_params->lora_paths.size();++i)
|
||||
{
|
||||
if (!lora_dynamic && sd_params->lora_multipliers[i] == 0.)
|
||||
continue;
|
||||
sd_lora_t spec = {};
|
||||
spec.path = sd_params->lora_paths[i].c_str();
|
||||
spec.multiplier = inputs.lora_multiplier;
|
||||
sd_params->lora_specs.push_back(spec);
|
||||
spec.multiplier = sd_params->lora_multipliers[i];
|
||||
lora_specs.push_back(spec);
|
||||
}
|
||||
|
||||
if(sd_params->lora_specs.size()>0 && inputs.lora_multiplier>0)
|
||||
if(lora_specs.size()>0)
|
||||
{
|
||||
printf("\nApply %zu LoRAs...\n",sd_params->lora_specs.size());
|
||||
sd_params->lora_count = sd_params->lora_specs.size();
|
||||
sd_ctx->sd->apply_loras(sd_params->lora_specs.data(), sd_params->lora_count);
|
||||
printf(" applying %zu LoRAs...\n", lora_specs.size());
|
||||
sd_ctx->sd->apply_loras(lora_specs.data(), lora_specs.size());
|
||||
}
|
||||
|
||||
input_extraimage_buffers.reserve(max_extra_images);
|
||||
|
|
@ -478,10 +488,10 @@ static std::string get_scheduler_name(scheduler_t scheduler, bool as_sampler_suf
|
|||
}
|
||||
}
|
||||
|
||||
static std::string get_image_params(const sd_img_gen_params_t & params) {
|
||||
static std::string get_image_params(const sd_img_gen_params_t & params, const std::string& lora_meta) {
|
||||
std::stringstream ss;
|
||||
ss << std::setprecision(3)
|
||||
<< "Prompt: " << params.prompt
|
||||
<< "Prompt: " << params.prompt << lora_meta
|
||||
<< " | NegativePrompt: " << params.negative_prompt
|
||||
<< " | Steps: " << params.sample_params.sample_steps
|
||||
<< " | CFGScale: " << params.sample_params.guidance.txt_cfg
|
||||
|
|
@ -1034,10 +1044,38 @@ sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs)
|
|||
params.vae_tiling_params.enabled = dotile;
|
||||
params.batch_count = 1;
|
||||
|
||||
// needs to be "reapplied" because sdcpp tracks previously applied LoRAs
|
||||
// and weights, and apply/unapply the differences at each gen
|
||||
params.loras = sd_params->lora_specs.data();
|
||||
params.lora_count = sd_params->lora_count;
|
||||
std::vector<sd_lora_t> lora_specs;
|
||||
std::stringstream lora_meta;
|
||||
lora_meta << std::setprecision(6);
|
||||
for(size_t i=0;i<sd_params->lora_paths.size();++i)
|
||||
{
|
||||
float multiplier = sd_params->lora_multipliers[i];
|
||||
if (sd_params->lora_dynamic) {
|
||||
multiplier = i < inputs.lora_len ? inputs.lora_multipliers[i] : 0.;
|
||||
}
|
||||
if (multiplier != 0.f) {
|
||||
sd_lora_t spec = {};
|
||||
spec.path = sd_params->lora_paths[i].c_str();
|
||||
spec.multiplier = multiplier;
|
||||
lora_specs.push_back(spec);
|
||||
std::string lora_name = std::filesystem::path(sd_params->lora_paths[i]).stem();
|
||||
lora_meta << "<lora:" << lora_name << ":" << multiplier << ">";
|
||||
}
|
||||
}
|
||||
if(!sd_is_quiet && sddebugmode==1) {
|
||||
if (lora_specs.size() > 0) {
|
||||
printf("Applying LoRAs:\n");
|
||||
for(size_t i=0;i<lora_specs.size();++i)
|
||||
{
|
||||
printf(" %s @ %.3f\n", lora_specs[i].path, lora_specs[i].multiplier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// note sdcpp tracks previously applied LoRAs and weights,
|
||||
// and apply/unapply the differences at each gen
|
||||
params.loras = lora_specs.data();
|
||||
params.lora_count = lora_specs.size();
|
||||
|
||||
params.ref_images = reference_imgs.data();
|
||||
params.ref_images_count = reference_imgs.size();
|
||||
|
|
@ -1264,9 +1302,9 @@ sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs)
|
|||
{
|
||||
printf("Upscaling output image...\n");
|
||||
upscaled_image = upscale(upscaler_ctx, results[i], 2);
|
||||
png = stbi_write_png_to_mem(upscaled_image.data, 0, upscaled_image.width, upscaled_image.height, upscaled_image.channel, &out_data_len, get_image_params(params).c_str());
|
||||
png = stbi_write_png_to_mem(upscaled_image.data, 0, upscaled_image.width, upscaled_image.height, upscaled_image.channel, &out_data_len, get_image_params(params, lora_meta.str()).c_str());
|
||||
} else {
|
||||
png = stbi_write_png_to_mem(results[i].data, 0, results[i].width, results[i].height, results[i].channel, &out_data_len, get_image_params(params).c_str());
|
||||
png = stbi_write_png_to_mem(results[i].data, 0, results[i].width, results[i].height, results[i].channel, &out_data_len, get_image_params(params, lora_meta.str()).c_str());
|
||||
}
|
||||
|
||||
if (png != NULL)
|
||||
|
|
|
|||
|
|
@ -134,6 +134,7 @@ public:
|
|||
std::vector<std::shared_ptr<LoraModel>> diffusion_lora_models;
|
||||
std::vector<std::shared_ptr<LoraModel>> first_stage_lora_models;
|
||||
bool apply_lora_immediately = false;
|
||||
std::map<std::string, std::shared_ptr<LoraModel>> kcpp_lora_cache;
|
||||
|
||||
std::string taesd_path;
|
||||
bool use_tiny_autoencoder = false;
|
||||
|
|
@ -1193,7 +1194,23 @@ public:
|
|||
std::shared_ptr<LoraModel> load_lora_model_from_file(const std::string& lora_id,
|
||||
float multiplier,
|
||||
ggml_backend_t backend,
|
||||
std::string stage = "",
|
||||
LoraModel::filter_t lora_tensor_filter = nullptr) {
|
||||
// kcpp
|
||||
// first check the cache
|
||||
bool kcpp_at_runtime = (stage != "");
|
||||
std::string lora_key = "|" + stage + "|" + lora_id;
|
||||
if (kcpp_at_runtime) {
|
||||
auto it = kcpp_lora_cache.find(lora_key);
|
||||
if (it != kcpp_lora_cache.end()) {
|
||||
if (it->second) {
|
||||
it->second->multiplier = multiplier;
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
}
|
||||
// by construction, kcpp will always find the preloaded LoRAs on the cache
|
||||
|
||||
std::string lora_path = lora_id;
|
||||
static std::string high_noise_tag = "|high_noise|";
|
||||
bool is_high_noise = false;
|
||||
|
|
@ -1205,10 +1222,16 @@ public:
|
|||
auto lora = std::make_shared<LoraModel>(lora_id, backend, lora_path, is_high_noise ? "model.high_noise_" : "", version);
|
||||
if (!lora->load_from_file(n_threads, lora_tensor_filter)) {
|
||||
LOG_WARN("load lora tensors from %s failed", lora_path.c_str());
|
||||
return nullptr;
|
||||
// also cache negatives to avoid I/O at runtime
|
||||
lora = nullptr;
|
||||
if (kcpp_at_runtime)
|
||||
kcpp_lora_cache[lora_key] = lora;
|
||||
return lora;
|
||||
}
|
||||
|
||||
lora->multiplier = multiplier;
|
||||
if (kcpp_at_runtime)
|
||||
kcpp_lora_cache[lora_key] = lora;
|
||||
return lora;
|
||||
}
|
||||
|
||||
|
|
@ -1299,7 +1322,7 @@ public:
|
|||
const std::string& lora_id = kv.first;
|
||||
float multiplier = kv.second;
|
||||
|
||||
auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backend, lora_tensor_filter);
|
||||
auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backend, "cond_stage", lora_tensor_filter);
|
||||
if (lora && !lora->lora_tensors.empty()) {
|
||||
lora->preprocess_lora_tensors(tensors);
|
||||
cond_stage_lora_models.push_back(lora);
|
||||
|
|
@ -1331,7 +1354,7 @@ public:
|
|||
const std::string& lora_name = kv.first;
|
||||
float multiplier = kv.second;
|
||||
|
||||
auto lora = load_lora_model_from_file(lora_name, multiplier, backend, lora_tensor_filter);
|
||||
auto lora = load_lora_model_from_file(lora_name, multiplier, backend, "diffusion", lora_tensor_filter);
|
||||
if (lora && !lora->lora_tensors.empty()) {
|
||||
lora->preprocess_lora_tensors(tensors);
|
||||
diffusion_lora_models.push_back(lora);
|
||||
|
|
@ -1367,7 +1390,7 @@ public:
|
|||
const std::string& lora_name = kv.first;
|
||||
float multiplier = kv.second;
|
||||
|
||||
auto lora = load_lora_model_from_file(lora_name, multiplier, vae_backend, lora_tensor_filter);
|
||||
auto lora = load_lora_model_from_file(lora_name, multiplier, vae_backend, "first_stage", lora_tensor_filter);
|
||||
if (lora && !lora->lora_tensors.empty()) {
|
||||
lora->preprocess_lora_tensors(tensors);
|
||||
first_stage_lora_models.push_back(lora);
|
||||
|
|
|
|||
82
tests/test_koboldcpp.py
Normal file
82
tests/test_koboldcpp.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
import sys
|
||||
import os
|
||||
|
||||
parent_dir = os.path.abspath(os.path.join(__file__, "..", ".."))
|
||||
sys.path.append(parent_dir)
|
||||
|
||||
import koboldcpp
|
||||
|
||||
def extract_loras_from_prompt(*args, **kwargs):
|
||||
"""
|
||||
>>> prompt = "no <lora: tag, even though with a : and 0> it could look like it"
|
||||
>>> clean, data = extract_loras_from_prompt(prompt)
|
||||
>>> clean
|
||||
'no <lora: tag, even though with a : and 0> it could look like it'
|
||||
>>> data
|
||||
[]
|
||||
|
||||
>>> prompt = "even after a <lora:valid:1> tag, an unending <lora: tag should be ignored"
|
||||
>>> clean, data = extract_loras_from_prompt(prompt)
|
||||
>>> clean
|
||||
'even after a tag, an unending <lora: tag should be ignored'
|
||||
>>> data
|
||||
[{'name': 'valid', 'multiplier': 1.0}]
|
||||
|
||||
>>> prompt = "A portrait <lora:models/face:0.8> with soft lighting"
|
||||
>>> clean, data = extract_loras_from_prompt(prompt)
|
||||
>>> clean
|
||||
'A portrait with soft lighting'
|
||||
>>> data
|
||||
[{'name': 'models/face', 'multiplier': 0.8}]
|
||||
|
||||
>>> prompt = "<lora:foo:1.0> start <lora:|high_noise|bar:0.5> end"
|
||||
>>> clean, data = extract_loras_from_prompt(prompt)
|
||||
>>> clean
|
||||
' start end'
|
||||
>>> data
|
||||
[{'name': 'foo', 'multiplier': 1.0}, {'name': 'bar', 'multiplier': 0.5, 'is_high_noise': True}]
|
||||
|
||||
>>> prompt = "bad <lora:bad:abc> good <lora:good:2>"
|
||||
>>> clean, data = extract_loras_from_prompt(prompt)
|
||||
>>> clean
|
||||
'bad <lora:bad:abc> good '
|
||||
>>> data
|
||||
[{'name': 'good', 'multiplier': 2.0}]
|
||||
|
||||
>>> prompt = "x<lora:a:0.15>y<lora:b:0.2>z"
|
||||
>>> clean, data = extract_loras_from_prompt(prompt)
|
||||
>>> clean
|
||||
'xyz'
|
||||
>>> data
|
||||
[{'name': 'a', 'multiplier': 0.15}, {'name': 'b', 'multiplier': 0.2}]
|
||||
"""
|
||||
|
||||
return koboldcpp.extract_loras_from_prompt(*args, **kwargs)
|
||||
|
||||
def sanitize_lora_multipliers(*args, **kwargs):
|
||||
"""
|
||||
>>> sanitize_lora_multipliers(None)
|
||||
[1.0]
|
||||
|
||||
>>> sanitize_lora_multipliers(0.75)
|
||||
[0.75]
|
||||
>>> sanitize_lora_multipliers("2")
|
||||
[2.0]
|
||||
|
||||
>>> sanitize_lora_multipliers([0.5, "1.2", 3])
|
||||
[0.5, 1.2, 3.0]
|
||||
|
||||
>>> sanitize_lora_multipliers([])
|
||||
[]
|
||||
|
||||
>>> sanitize_lora_multipliers(["bad", None, ""])
|
||||
[0.0, 0.0, 0.0]
|
||||
"""
|
||||
return koboldcpp.sanitize_lora_multipliers(*args, **kwargs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
failures, _ = doctest.testmod()
|
||||
if failures:
|
||||
raise SystemExit(f"{failures} doctest{'s' if failures != 1 else ''} failed")
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue