chroma support is now usable

This commit is contained in:
Concedo 2025-06-08 18:53:59 +08:00
parent 30cf433ab4
commit 2d4c1aa5a0
3 changed files with 20 additions and 9 deletions

View file

@ -201,9 +201,9 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
sd_params->t5xxl_path = t5xxl_filename;
sd_params->clip_l_path = clipl_filename;
sd_params->clip_g_path = clipg_filename;
//if clip and t5 is set, and model is a gguf, load it as a diffusion model path
//if t5 is set, and model is a gguf, load it as a diffusion model path
bool endswithgguf = (sd_params->model_path.rfind(".gguf") == sd_params->model_path.size() - 5);
if(sd_params->clip_l_path!="" && sd_params->t5xxl_path!="" && endswithgguf)
if(sd_params->t5xxl_path!="" && endswithgguf)
{
printf("\nSwap to Diffusion Model Path:%s",sd_params->model_path.c_str());
sd_params->diffusion_model_path = sd_params->model_path;
@ -342,11 +342,12 @@ sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs)
//ensure unsupported dimensions are fixed
int biggestdim = (sd_params->width>sd_params->height?sd_params->width:sd_params->height);
auto loadedsdver = get_loaded_sd_version(sd_ctx);
if(loadedsdver==SDVersion::VERSION_FLUX)
{
sd_params->cfg_scale = 1;
if(sampler=="euler a"||sampler=="k_euler_a"||sampler=="euler_a")
if (loadedsdver == SDVersion::VERSION_FLUX)
{
if (!sd_loaded_chroma()) {
sd_params->cfg_scale = 1; //non chroma clamp cfg scale
}
if (sampler == "euler a" || sampler == "k_euler_a" || sampler == "euler_a") {
sampler = "euler"; //euler a broken on flux
}
}

View file

@ -22,6 +22,7 @@
#include <cinttypes>
static std::string pending_apply_lora_fname = "";
static float pending_apply_lora_power = 1.0f;
static bool is_loaded_chroma = false;
const char* model_version_to_str[] = {
"SD 1.x",
@ -161,6 +162,7 @@ public:
bool diffusion_flash_attn) {
use_tiny_autoencoder = taesd_path.size() > 0;
std::string taesd_path_fixed = taesd_path;
is_loaded_chroma = false;
#ifdef SD_USE_CUDA
LOG_DEBUG("Using CUDA backend");
backend = ggml_backend_cuda_init(0);
@ -236,7 +238,7 @@ public:
version = model_loader.get_sd_version();
if (version == VERSION_COUNT && model_path.size() > 0 && clip_l_path.size() > 0 && diffusion_model_path.size() == 0 && t5xxl_path.size() > 0) {
if (version == VERSION_COUNT && model_path.size() > 0 && diffusion_model_path.size() == 0 && t5xxl_path.size() > 0) {
bool endswithsafetensors = (model_path.rfind(".safetensors") == model_path.size() - 12);
if(endswithsafetensors && !model_loader.has_diffusion_model_tensors())
{
@ -377,6 +379,7 @@ public:
for (auto pair : model_loader.tensor_storages_types) {
if (pair.first.find("distilled_guidance_layer.in_proj.weight") != std::string::npos) {
is_chroma = true;
is_loaded_chroma = true;
break;
}
}
@ -1202,6 +1205,12 @@ int get_loaded_sd_version(sd_ctx_t* ctx)
return ctx->sd->version;
}
//kcpp hack to check if chroma
bool sd_loaded_chroma()
{
return is_loaded_chroma;
}
sd_ctx_t* new_sd_ctx(const char* model_path_c_str,
const char* clip_l_path_c_str,
const char* clip_g_path_c_str,
@ -1655,7 +1664,7 @@ sd_image_t* txt2img(sd_ctx_t* sd_ctx,
params.mem_size *= 2; //readjust by kcpp as above changed
}
if (sd_version_is_flux(sd_ctx->sd->version)) {
params.mem_size *= 2; //readjust by kcpp as above changed
params.mem_size *= 3; //readjust by kcpp as above changed
}
if (sd_ctx->sd->stacked_id) {
params.mem_size += static_cast<size_t>(15 * 1024 * 1024); // 10 MB

View file

@ -131,6 +131,7 @@ typedef struct sd_ctx_t sd_ctx_t;
SD_API void set_sd_vae_tiling(sd_ctx_t* ctx, bool tiling);
SD_API int get_loaded_sd_version(sd_ctx_t* ctx);
SD_API bool sd_loaded_chroma();
SD_API sd_ctx_t* new_sd_ctx(const char* model_path,
const char* clip_l_path,