updated sdcpp, also set euler as default sampler

This commit is contained in:
Concedo 2024-12-01 17:00:20 +08:00
parent e93c2427b4
commit 2ba5949054
27 changed files with 1514 additions and 521 deletions

File diff suppressed because one or more lines are too long

View file

@ -4406,7 +4406,7 @@ Current version indicated by LITEVER below.
img_img2imgstr: 0.6, img_img2imgstr: 0.6,
img_clipskip: -1, img_clipskip: -1,
img_steps: 20, img_steps: 20,
img_sampler: "Euler a", img_sampler: "Euler",
img_aspect:0, //0=square,1=portrait,2=landscape,3=bigsquare img_aspect:0, //0=square,1=portrait,2=landscape,3=bigsquare
save_images: true, save_images: true,
save_remote_images: false, save_remote_images: false,
@ -19698,8 +19698,8 @@ Current version indicated by LITEVER below.
<div class="inlinelabel"> <div class="inlinelabel">
<div class="justifyleft" style="padding:4px">Sampler: </div> <div class="justifyleft" style="padding:4px">Sampler: </div>
<select title="Image Sampler" style="padding:1px; font-size:12px; height:20px; width: 100px;" class="form-control" id="img_sampler"> <select title="Image Sampler" style="padding:1px; font-size:12px; height:20px; width: 100px;" class="form-control" id="img_sampler">
<option value="Euler a">Euler A</option>
<option value="Euler">Euler</option> <option value="Euler">Euler</option>
<option value="Euler a">Euler A</option>
<option value="Heun">Heun</option> <option value="Heun">Heun</option>
<option value="DPM2">DPM2</option> <option value="DPM2">DPM2</option>
<option value="LCM">LCM</option> <option value="LCM">LCM</option>

View file

@ -56,7 +56,7 @@ maxhordelen = 400
modelbusy = threading.Lock() modelbusy = threading.Lock()
requestsinqueue = 0 requestsinqueue = 0
defaultport = 5001 defaultport = 5001
KcppVersion = "1.79.1" KcppVersion = "1.80"
showdebug = True showdebug = True
guimode = False guimode = False
showsamplerwarning = True showsamplerwarning = True
@ -2014,7 +2014,7 @@ Enter Prompt:<br>
if friendlysdmodelname=="inactive" or fullsdmodelpath=="": if friendlysdmodelname=="inactive" or fullsdmodelpath=="":
response_body = (json.dumps([]).encode()) response_body = (json.dumps([]).encode())
else: else:
response_body = (json.dumps([{"name":"Euler a","aliases":["k_euler_a","k_euler_ancestral"],"options":{}},{"name":"Euler","aliases":["k_euler"],"options":{}},{"name":"Heun","aliases":["k_heun"],"options":{}},{"name":"DPM2","aliases":["k_dpm_2"],"options":{}},{"name":"DPM++ 2M","aliases":["k_dpmpp_2m"],"options":{}},{"name":"LCM","aliases":["k_lcm"],"options":{}}]).encode()) response_body = (json.dumps([{"name":"Euler","aliases":["k_euler"],"options":{}},{"name":"Euler a","aliases":["k_euler_a","k_euler_ancestral"],"options":{}},{"name":"Heun","aliases":["k_heun"],"options":{}},{"name":"DPM2","aliases":["k_dpm_2"],"options":{}},{"name":"DPM++ 2M","aliases":["k_dpmpp_2m"],"options":{}},{"name":"LCM","aliases":["k_lcm"],"options":{}}]).encode())
elif self.path.endswith('/sdapi/v1/latent-upscale-modes'): elif self.path.endswith('/sdapi/v1/latent-upscale-modes'):
response_body = (json.dumps([]).encode()) response_body = (json.dumps([]).encode())
elif self.path.endswith('/sdapi/v1/upscalers'): elif self.path.endswith('/sdapi/v1/upscalers'):

View file

@ -343,6 +343,13 @@ public:
} }
} }
std::string clean_up_tokenization(std::string& text) {
std::regex pattern(R"( ,)");
// Replace " ," with ","
std::string result = std::regex_replace(text, pattern, ",");
return result;
}
std::string decode(const std::vector<int>& tokens) { std::string decode(const std::vector<int>& tokens) {
std::string text = ""; std::string text = "";
for (int t : tokens) { for (int t : tokens) {
@ -351,8 +358,12 @@ public:
std::u32string ts = decoder[t]; std::u32string ts = decoder[t];
// printf("%d, %s \n", t, utf32_to_utf8(ts).c_str()); // printf("%d, %s \n", t, utf32_to_utf8(ts).c_str());
std::string s = utf32_to_utf8(ts); std::string s = utf32_to_utf8(ts);
if (s.length() >= 4 && ends_with(s, "</w>")) { if (s.length() >= 4) {
text += " " + s.replace(s.length() - 4, s.length() - 1, ""); if (ends_with(s, "</w>")) {
text += s.replace(s.length() - 4, s.length() - 1, "") + " ";
} else {
text += s;
}
} else { } else {
text += " " + s; text += " " + s;
} }
@ -364,6 +375,7 @@ public:
// std::string s((char *)bytes.data()); // std::string s((char *)bytes.data());
// std::string s = ""; // std::string s = "";
text = clean_up_tokenization(text);
return trim(text); return trim(text);
} }
@ -533,9 +545,12 @@ protected:
int64_t vocab_size; int64_t vocab_size;
int64_t num_positions; int64_t num_positions;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, wtype, embed_dim, vocab_size); enum ggml_type token_wtype = (tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32;
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions); enum ggml_type position_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32;
params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, token_wtype, embed_dim, vocab_size);
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions);
} }
public: public:
@ -579,11 +594,14 @@ protected:
int64_t image_size; int64_t image_size;
int64_t num_patches; int64_t num_patches;
int64_t num_positions; int64_t num_positions;
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
enum ggml_type patch_wtype = GGML_TYPE_F16; // tensor_types.find(prefix + "patch_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "patch_embedding.weight"] : GGML_TYPE_F16;
enum ggml_type class_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "class_embedding") != tensor_types.end() ? tensor_types[prefix + "class_embedding"] : GGML_TYPE_F32;
enum ggml_type position_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32;
void init_params(struct ggml_context* ctx, ggml_type wtype) { params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, patch_wtype, patch_size, patch_size, num_channels, embed_dim);
params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, patch_size, patch_size, num_channels, embed_dim); params["class_embedding"] = ggml_new_tensor_1d(ctx, class_wtype, embed_dim);
params["class_embedding"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, embed_dim); params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions);
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions);
} }
public: public:
@ -639,9 +657,10 @@ enum CLIPVersion {
class CLIPTextModel : public GGMLBlock { class CLIPTextModel : public GGMLBlock {
protected: protected:
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
if (version == OPEN_CLIP_VIT_BIGG_14) { if (version == OPEN_CLIP_VIT_BIGG_14) {
params["text_projection"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, projection_dim, hidden_size); enum ggml_type wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "text_projection") != tensor_types.end() ? tensor_types[prefix + "text_projection"] : GGML_TYPE_F32;
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
} }
} }
@ -711,8 +730,12 @@ public:
if (return_pooled) { if (return_pooled) {
auto text_projection = params["text_projection"]; auto text_projection = params["text_projection"];
ggml_tensor* pooled = ggml_view_1d(ctx, x, hidden_size, x->nb[1] * max_token_idx); ggml_tensor* pooled = ggml_view_1d(ctx, x, hidden_size, x->nb[1] * max_token_idx);
pooled = ggml_mul_mat(ctx, ggml_cont(ctx, ggml_transpose(ctx, text_projection)), pooled); if (text_projection != NULL) {
return pooled; pooled = ggml_nn_linear(ctx, pooled, text_projection, NULL);
} else {
LOG_DEBUG("Missing text_projection matrix, assuming identity...");
}
return pooled; // [hidden_size, 1, 1]
} }
return x; // [N, n_token, hidden_size] return x; // [N, n_token, hidden_size]
@ -761,6 +784,8 @@ public:
auto x = embeddings->forward(ctx, pixel_values); // [N, num_positions, embed_dim] auto x = embeddings->forward(ctx, pixel_values); // [N, num_positions, embed_dim]
x = pre_layernorm->forward(ctx, x); x = pre_layernorm->forward(ctx, x);
x = encoder->forward(ctx, x, -1, false); x = encoder->forward(ctx, x, -1, false);
// print_ggml_tensor(x, true, "ClipVisionModel x: ");
auto last_hidden_state = x;
x = post_layernorm->forward(ctx, x); // [N, n_token, hidden_size] x = post_layernorm->forward(ctx, x); // [N, n_token, hidden_size]
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
@ -768,7 +793,8 @@ public:
ggml_tensor* pooled = ggml_cont(ctx, ggml_view_2d(ctx, x, x->ne[0], x->ne[2], x->nb[2], 0)); ggml_tensor* pooled = ggml_cont(ctx, ggml_view_2d(ctx, x, x->ne[0], x->ne[2], x->nb[2], 0));
return pooled; // [N, hidden_size] return pooled; // [N, hidden_size]
} else { } else {
return x; // [N, n_token, hidden_size] // return x; // [N, n_token, hidden_size]
return last_hidden_state; // [N, n_token, hidden_size]
} }
} }
}; };
@ -779,9 +805,9 @@ protected:
int64_t out_features; int64_t out_features;
bool transpose_weight; bool transpose_weight;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
enum ggml_type wtype = tensor_types.find(prefix + "weight") != tensor_types.end() ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
if (transpose_weight) { if (transpose_weight) {
LOG_ERROR("transpose_weight");
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features); params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
} else { } else {
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features); params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
@ -842,12 +868,13 @@ struct CLIPTextModelRunner : public GGMLRunner {
CLIPTextModel model; CLIPTextModel model;
CLIPTextModelRunner(ggml_backend_t backend, CLIPTextModelRunner(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
const std::string prefix,
CLIPVersion version = OPENAI_CLIP_VIT_L_14, CLIPVersion version = OPENAI_CLIP_VIT_L_14,
int clip_skip_value = 1, int clip_skip_value = 1,
bool with_final_ln = true) bool with_final_ln = true)
: GGMLRunner(backend, wtype), model(version, clip_skip_value, with_final_ln) { : GGMLRunner(backend), model(version, clip_skip_value, with_final_ln) {
model.init(params_ctx, wtype); model.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {
@ -889,13 +916,13 @@ struct CLIPTextModelRunner : public GGMLRunner {
struct ggml_tensor* embeddings = NULL; struct ggml_tensor* embeddings = NULL;
if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) { if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) {
auto token_embed_weight = model.get_token_embed_weight();
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx, auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
wtype, token_embed_weight->type,
model.hidden_size, model.hidden_size,
num_custom_embeddings); num_custom_embeddings);
set_backend_tensor_data(custom_embeddings, custom_embeddings_data); set_backend_tensor_data(custom_embeddings, custom_embeddings_data);
auto token_embed_weight = model.get_token_embed_weight();
// concatenate custom embeddings // concatenate custom embeddings
embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1); embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1);
} }

View file

@ -182,9 +182,11 @@ protected:
int64_t dim_in; int64_t dim_in;
int64_t dim_out; int64_t dim_out;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32;
enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2); params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
params["proj.bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim_out * 2); params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2);
} }
public: public:
@ -245,16 +247,19 @@ protected:
int64_t context_dim; int64_t context_dim;
int64_t n_head; int64_t n_head;
int64_t d_head; int64_t d_head;
bool flash_attn;
public: public:
CrossAttention(int64_t query_dim, CrossAttention(int64_t query_dim,
int64_t context_dim, int64_t context_dim,
int64_t n_head, int64_t n_head,
int64_t d_head) int64_t d_head,
bool flash_attn = false)
: n_head(n_head), : n_head(n_head),
d_head(d_head), d_head(d_head),
query_dim(query_dim), query_dim(query_dim),
context_dim(context_dim) { context_dim(context_dim),
flash_attn(flash_attn) {
int64_t inner_dim = d_head * n_head; int64_t inner_dim = d_head * n_head;
blocks["to_q"] = std::shared_ptr<GGMLBlock>(new Linear(query_dim, inner_dim, false)); blocks["to_q"] = std::shared_ptr<GGMLBlock>(new Linear(query_dim, inner_dim, false));
@ -283,7 +288,7 @@ public:
auto k = to_k->forward(ctx, context); // [N, n_context, inner_dim] auto k = to_k->forward(ctx, context); // [N, n_context, inner_dim]
auto v = to_v->forward(ctx, context); // [N, n_context, inner_dim] auto v = to_v->forward(ctx, context); // [N, n_context, inner_dim]
x = ggml_nn_attention_ext(ctx, q, k, v, n_head, NULL, false); // [N, n_token, inner_dim] x = ggml_nn_attention_ext(ctx, q, k, v, n_head, NULL, false, false, flash_attn); // [N, n_token, inner_dim]
x = to_out_0->forward(ctx, x); // [N, n_token, query_dim] x = to_out_0->forward(ctx, x); // [N, n_token, query_dim]
return x; return x;
@ -301,15 +306,16 @@ public:
int64_t n_head, int64_t n_head,
int64_t d_head, int64_t d_head,
int64_t context_dim, int64_t context_dim,
bool ff_in = false) bool ff_in = false,
bool flash_attn = false)
: n_head(n_head), d_head(d_head), ff_in(ff_in) { : n_head(n_head), d_head(d_head), ff_in(ff_in) {
// disable_self_attn is always False // disable_self_attn is always False
// disable_temporal_crossattention is always False // disable_temporal_crossattention is always False
// switch_temporal_ca_to_sa is always False // switch_temporal_ca_to_sa is always False
// inner_dim is always None or equal to dim // inner_dim is always None or equal to dim
// gated_ff is always True // gated_ff is always True
blocks["attn1"] = std::shared_ptr<GGMLBlock>(new CrossAttention(dim, dim, n_head, d_head)); blocks["attn1"] = std::shared_ptr<GGMLBlock>(new CrossAttention(dim, dim, n_head, d_head, flash_attn));
blocks["attn2"] = std::shared_ptr<GGMLBlock>(new CrossAttention(dim, context_dim, n_head, d_head)); blocks["attn2"] = std::shared_ptr<GGMLBlock>(new CrossAttention(dim, context_dim, n_head, d_head, flash_attn));
blocks["ff"] = std::shared_ptr<GGMLBlock>(new FeedForward(dim, dim)); blocks["ff"] = std::shared_ptr<GGMLBlock>(new FeedForward(dim, dim));
blocks["norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim)); blocks["norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim));
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim)); blocks["norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim));
@ -374,7 +380,8 @@ public:
int64_t n_head, int64_t n_head,
int64_t d_head, int64_t d_head,
int64_t depth, int64_t depth,
int64_t context_dim) int64_t context_dim,
bool flash_attn = false)
: in_channels(in_channels), : in_channels(in_channels),
n_head(n_head), n_head(n_head),
d_head(d_head), d_head(d_head),
@ -388,7 +395,7 @@ public:
for (int i = 0; i < depth; i++) { for (int i = 0; i < depth; i++) {
std::string name = "transformer_blocks." + std::to_string(i); std::string name = "transformer_blocks." + std::to_string(i);
blocks[name] = std::shared_ptr<GGMLBlock>(new BasicTransformerBlock(inner_dim, n_head, d_head, context_dim)); blocks[name] = std::shared_ptr<GGMLBlock>(new BasicTransformerBlock(inner_dim, n_head, d_head, context_dim, false, flash_attn));
} }
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(inner_dim, in_channels, {1, 1})); blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(inner_dim, in_channels, {1, 1}));
@ -433,8 +440,10 @@ public:
class AlphaBlender : public GGMLBlock { class AlphaBlender : public GGMLBlock {
protected: protected:
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
} }
float get_alpha() { float get_alpha() {

View file

@ -44,8 +44,8 @@ struct Conditioner {
// Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/sd_hijack_clip.py#L283 // Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/sd_hijack_clip.py#L283
struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
SDVersion version = VERSION_SD1; SDVersion version = VERSION_SD1;
PMVersion pm_version = PM_VERSION_1;
CLIPTokenizer tokenizer; CLIPTokenizer tokenizer;
ggml_type wtype;
std::shared_ptr<CLIPTextModelRunner> text_model; std::shared_ptr<CLIPTextModelRunner> text_model;
std::shared_ptr<CLIPTextModelRunner> text_model2; std::shared_ptr<CLIPTextModelRunner> text_model2;
@ -56,11 +56,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
std::vector<std::string> readed_embeddings; std::vector<std::string> readed_embeddings;
FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend, FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
const std::string& embd_dir, const std::string& embd_dir,
SDVersion version = VERSION_SD1, SDVersion version = VERSION_SD1,
PMVersion pv = PM_VERSION_1,
int clip_skip = -1) int clip_skip = -1)
: version(version), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir), wtype(wtype) { : version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir) {
if (clip_skip <= 0) { if (clip_skip <= 0) {
clip_skip = 1; clip_skip = 1;
if (version == VERSION_SD2 || version == VERSION_SDXL) { if (version == VERSION_SD2 || version == VERSION_SDXL) {
@ -68,12 +69,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
} }
} }
if (version == VERSION_SD1) { if (version == VERSION_SD1) {
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip); text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip);
} else if (version == VERSION_SD2) { } else if (version == VERSION_SD2) {
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_H_14, clip_skip); text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, clip_skip);
} else if (version == VERSION_SDXL) { } else if (version == VERSION_SDXL) {
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false); text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false);
text_model2 = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false); text_model2 = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
} }
} }
@ -136,14 +137,14 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size); LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size);
return false; return false;
} }
embd = ggml_new_tensor_2d(embd_ctx, wtype, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1); embd = ggml_new_tensor_2d(embd_ctx, tensor_storage.type, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1);
*dst_tensor = embd; *dst_tensor = embd;
return true; return true;
}; };
model_loader.load_tensors(on_load, NULL); model_loader.load_tensors(on_load, NULL);
readed_embeddings.push_back(embd_name); readed_embeddings.push_back(embd_name);
token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd)); token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd));
memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(wtype)), memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(embd->type)),
embd->data, embd->data,
ggml_nbytes(embd)); ggml_nbytes(embd));
for (int i = 0; i < embd->ne[1]; i++) { for (int i = 0; i < embd->ne[1]; i++) {
@ -268,7 +269,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
std::vector<int> clean_input_ids_tmp; std::vector<int> clean_input_ids_tmp;
for (uint32_t i = 0; i < class_token_index[0]; i++) for (uint32_t i = 0; i < class_token_index[0]; i++)
clean_input_ids_tmp.push_back(clean_input_ids[i]); clean_input_ids_tmp.push_back(clean_input_ids[i]);
for (uint32_t i = 0; i < num_input_imgs; i++) for (uint32_t i = 0; i < (pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs); i++)
clean_input_ids_tmp.push_back(class_token); clean_input_ids_tmp.push_back(class_token);
for (uint32_t i = class_token_index[0] + 1; i < clean_input_ids.size(); i++) for (uint32_t i = class_token_index[0] + 1; i < clean_input_ids.size(); i++)
clean_input_ids_tmp.push_back(clean_input_ids[i]); clean_input_ids_tmp.push_back(clean_input_ids[i]);
@ -279,13 +280,16 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
tokens.insert(tokens.end(), clean_input_ids.begin(), clean_input_ids.end()); tokens.insert(tokens.end(), clean_input_ids.begin(), clean_input_ids.end());
weights.insert(weights.end(), clean_input_ids.size(), curr_weight); weights.insert(weights.end(), clean_input_ids.size(), curr_weight);
} }
tokens.insert(tokens.begin(), tokenizer.BOS_TOKEN_ID); // BUG!! double couting, pad_tokens will add BOS at the beginning
weights.insert(weights.begin(), 1.0); // tokens.insert(tokens.begin(), tokenizer.BOS_TOKEN_ID);
// weights.insert(weights.begin(), 1.0);
tokenizer.pad_tokens(tokens, weights, max_length, padding); tokenizer.pad_tokens(tokens, weights, max_length, padding);
int offset = pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs;
for (uint32_t i = 0; i < tokens.size(); i++) { for (uint32_t i = 0; i < tokens.size(); i++) {
if (class_idx + 1 <= i && i < class_idx + 1 + num_input_imgs) // if (class_idx + 1 <= i && i < class_idx + 1 + 2*num_input_imgs) // photomaker V2 has num_tokens(=2)*num_input_imgs
if (class_idx + 1 <= i && i < class_idx + 1 + offset) // photomaker V2 has num_tokens(=2)*num_input_imgs
// hardcode for now
class_token_mask.push_back(true); class_token_mask.push_back(true);
else else
class_token_mask.push_back(false); class_token_mask.push_back(false);
@ -450,7 +454,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
} }
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing condition graph completed, taking %d ms", t1 - t0); LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0);
ggml_tensor* result = ggml_dup_tensor(work_ctx, chunk_hidden_states); ggml_tensor* result = ggml_dup_tensor(work_ctx, chunk_hidden_states);
{ {
float original_mean = ggml_tensor_mean(chunk_hidden_states); float original_mean = ggml_tensor_mean(chunk_hidden_states);
@ -585,9 +589,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
struct FrozenCLIPVisionEmbedder : public GGMLRunner { struct FrozenCLIPVisionEmbedder : public GGMLRunner {
CLIPVisionModelProjection vision_model; CLIPVisionModelProjection vision_model;
FrozenCLIPVisionEmbedder(ggml_backend_t backend, ggml_type wtype) FrozenCLIPVisionEmbedder(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types)
: vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend, wtype) { : vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend) {
vision_model.init(params_ctx, wtype); vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer");
} }
std::string get_desc() { std::string get_desc() {
@ -622,7 +626,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner {
}; };
struct SD3CLIPEmbedder : public Conditioner { struct SD3CLIPEmbedder : public Conditioner {
ggml_type wtype;
CLIPTokenizer clip_l_tokenizer; CLIPTokenizer clip_l_tokenizer;
CLIPTokenizer clip_g_tokenizer; CLIPTokenizer clip_g_tokenizer;
T5UniGramTokenizer t5_tokenizer; T5UniGramTokenizer t5_tokenizer;
@ -631,15 +634,15 @@ struct SD3CLIPEmbedder : public Conditioner {
std::shared_ptr<T5Runner> t5; std::shared_ptr<T5Runner> t5;
SD3CLIPEmbedder(ggml_backend_t backend, SD3CLIPEmbedder(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
int clip_skip = -1) int clip_skip = -1)
: wtype(wtype), clip_g_tokenizer(0) { : clip_g_tokenizer(0) {
if (clip_skip <= 0) { if (clip_skip <= 0) {
clip_skip = 2; clip_skip = 2;
} }
clip_l = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false); clip_l = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false);
clip_g = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false); clip_g = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
t5 = std::make_shared<T5Runner>(backend, wtype); t5 = std::make_shared<T5Runner>(backend, tensor_types, "text_encoders.t5xxl.transformer");
} }
void set_clip_skip(int clip_skip) { void set_clip_skip(int clip_skip) {
@ -798,21 +801,16 @@ struct SD3CLIPEmbedder : public Conditioner {
} }
if (chunk_idx == 0) { if (chunk_idx == 0) {
// auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID); auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
// max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1); max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
// clip_l->compute(n_threads, clip_l->compute(n_threads,
// input_ids, input_ids,
// 0, 0,
// NULL, NULL,
// max_token_idx, max_token_idx,
// true, true,
// &pooled_l, &pooled_l,
// work_ctx); work_ctx);
// clip_l.transformer.text_model.text_projection no in file, ignore
// TODO: use torch.eye(embed_dim) as default clip_l.transformer.text_model.text_projection
pooled_l = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 768);
ggml_set_f32(pooled_l, 0.f);
} }
} }
@ -852,21 +850,16 @@ struct SD3CLIPEmbedder : public Conditioner {
} }
if (chunk_idx == 0) { if (chunk_idx == 0) {
// auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_g_tokenizer.EOS_TOKEN_ID); auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_g_tokenizer.EOS_TOKEN_ID);
// max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1); max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
// clip_g->compute(n_threads, clip_g->compute(n_threads,
// input_ids, input_ids,
// 0, 0,
// NULL, NULL,
// max_token_idx, max_token_idx,
// true, true,
// &pooled_g, &pooled_g,
// work_ctx); work_ctx);
// clip_l.transformer.text_model.text_projection no in file, ignore pooled_g too
// TODO: fix pooled_g
pooled_g = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 1280);
ggml_set_f32(pooled_g, 0.f);
} }
} }
@ -927,7 +920,7 @@ struct SD3CLIPEmbedder : public Conditioner {
} }
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing condition graph completed, taking %d ms", t1 - t0); LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0);
if (force_zero_embeddings) { if (force_zero_embeddings) {
float* vec = (float*)chunk_hidden_states->data; float* vec = (float*)chunk_hidden_states->data;
for (int i = 0; i < ggml_nelements(chunk_hidden_states); i++) { for (int i = 0; i < ggml_nelements(chunk_hidden_states); i++) {
@ -979,21 +972,19 @@ struct SD3CLIPEmbedder : public Conditioner {
}; };
struct FluxCLIPEmbedder : public Conditioner { struct FluxCLIPEmbedder : public Conditioner {
ggml_type wtype;
CLIPTokenizer clip_l_tokenizer; CLIPTokenizer clip_l_tokenizer;
T5UniGramTokenizer t5_tokenizer; T5UniGramTokenizer t5_tokenizer;
std::shared_ptr<CLIPTextModelRunner> clip_l; std::shared_ptr<CLIPTextModelRunner> clip_l;
std::shared_ptr<T5Runner> t5; std::shared_ptr<T5Runner> t5;
FluxCLIPEmbedder(ggml_backend_t backend, FluxCLIPEmbedder(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
int clip_skip = -1) int clip_skip = -1) {
: wtype(wtype) {
if (clip_skip <= 0) { if (clip_skip <= 0) {
clip_skip = 2; clip_skip = 2;
} }
clip_l = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, true); clip_l = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, true);
t5 = std::make_shared<T5Runner>(backend, wtype); t5 = std::make_shared<T5Runner>(backend, tensor_types, "text_encoders.t5xxl.transformer");
} }
void set_clip_skip(int clip_skip) { void set_clip_skip(int clip_skip) {
@ -1104,21 +1095,17 @@ struct FluxCLIPEmbedder : public Conditioner {
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
size_t max_token_idx = 0; size_t max_token_idx = 0;
// auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID); auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
// max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1); max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
// clip_l->compute(n_threads,
// input_ids,
// 0,
// NULL,
// max_token_idx,
// true,
// &pooled,
// work_ctx);
// clip_l.transformer.text_model.text_projection no in file, ignore clip_l->compute(n_threads,
// TODO: use torch.eye(embed_dim) as default clip_l.transformer.text_model.text_projection input_ids,
pooled = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 768); 0,
ggml_set_f32(pooled, 0.f); NULL,
max_token_idx,
true,
&pooled,
work_ctx);
} }
// t5 // t5
@ -1152,7 +1139,7 @@ struct FluxCLIPEmbedder : public Conditioner {
} }
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing condition graph completed, taking %d ms", t1 - t0); LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0);
if (force_zero_embeddings) { if (force_zero_embeddings) {
float* vec = (float*)chunk_hidden_states->data; float* vec = (float*)chunk_hidden_states->data;
for (int i = 0; i < ggml_nelements(chunk_hidden_states); i++) { for (int i = 0; i < ggml_nelements(chunk_hidden_states); i++) {

View file

@ -317,10 +317,10 @@ struct ControlNet : public GGMLRunner {
bool guided_hint_cached = false; bool guided_hint_cached = false;
ControlNet(ggml_backend_t backend, ControlNet(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
SDVersion version = VERSION_SD1) SDVersion version = VERSION_SD1)
: GGMLRunner(backend, wtype), control_net(version) { : GGMLRunner(backend), control_net(version) {
control_net.init(params_ctx, wtype); control_net.init(params_ctx, tensor_types, "");
} }
~ControlNet() { ~ControlNet() {

View file

@ -17,7 +17,8 @@ struct DiffusionModel {
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f, float control_strength = 0.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) = 0; struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) = 0;
virtual void alloc_params_buffer() = 0; virtual void alloc_params_buffer() = 0;
virtual void free_params_buffer() = 0; virtual void free_params_buffer() = 0;
virtual void free_compute_buffer() = 0; virtual void free_compute_buffer() = 0;
@ -30,9 +31,10 @@ struct UNetModel : public DiffusionModel {
UNetModelRunner unet; UNetModelRunner unet;
UNetModel(ggml_backend_t backend, UNetModel(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
SDVersion version = VERSION_SD1) SDVersion version = VERSION_SD1,
: unet(backend, wtype, version) { bool flash_attn = false)
: unet(backend, tensor_types, "model.diffusion_model", version, flash_attn) {
} }
void alloc_params_buffer() { void alloc_params_buffer() {
@ -70,7 +72,9 @@ struct UNetModel : public DiffusionModel {
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f, float control_strength = 0.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) {
(void)skip_layers; // SLG doesn't work with UNet models
return unet.compute(n_threads, x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength, output, output_ctx); return unet.compute(n_threads, x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength, output, output_ctx);
} }
}; };
@ -79,9 +83,8 @@ struct MMDiTModel : public DiffusionModel {
MMDiTRunner mmdit; MMDiTRunner mmdit;
MMDiTModel(ggml_backend_t backend, MMDiTModel(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types)
SDVersion version = VERSION_SD3_2B) : mmdit(backend, tensor_types, "model.diffusion_model") {
: mmdit(backend, wtype, version) {
} }
void alloc_params_buffer() { void alloc_params_buffer() {
@ -119,8 +122,9 @@ struct MMDiTModel : public DiffusionModel {
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f, float control_strength = 0.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = NULL,
return mmdit.compute(n_threads, x, timesteps, context, y, output, output_ctx); std::vector<int> skip_layers = std::vector<int>()) {
return mmdit.compute(n_threads, x, timesteps, context, y, output, output_ctx, skip_layers);
} }
}; };
@ -128,9 +132,9 @@ struct FluxModel : public DiffusionModel {
Flux::FluxRunner flux; Flux::FluxRunner flux;
FluxModel(ggml_backend_t backend, FluxModel(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
SDVersion version = VERSION_FLUX_DEV) bool flash_attn = false)
: flux(backend, wtype, version) { : flux(backend, tensor_types, "model.diffusion_model", flash_attn) {
} }
void alloc_params_buffer() { void alloc_params_buffer() {
@ -168,8 +172,9 @@ struct FluxModel : public DiffusionModel {
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f, float control_strength = 0.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = NULL,
return flux.compute(n_threads, x, timesteps, context, y, guidance, output, output_ctx); std::vector<int> skip_layers = std::vector<int>()) {
return flux.compute(n_threads, x, timesteps, context, y, guidance, output, output_ctx, skip_layers);
} }
}; };

View file

@ -142,10 +142,9 @@ struct ESRGAN : public GGMLRunner {
int scale = 4; int scale = 4;
int tile_size = 128; // avoid cuda OOM for 4gb VRAM int tile_size = 128; // avoid cuda OOM for 4gb VRAM
ESRGAN(ggml_backend_t backend, ESRGAN(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types)
ggml_type wtype) : GGMLRunner(backend) {
: GGMLRunner(backend, wtype) { rrdb_net.init(params_ctx, tensor_types, "");
rrdb_net.init(params_ctx, wtype);
} }
std::string get_desc() { std::string get_desc() {

View file

@ -35,8 +35,9 @@ namespace Flux {
int64_t hidden_size; int64_t hidden_size;
float eps; float eps;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
params["scale"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "scale") != tensor_types.end()) ? tensor_types[prefix + "scale"] : GGML_TYPE_F32;
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
} }
public: public:
@ -115,25 +116,28 @@ namespace Flux {
struct ggml_tensor* q, struct ggml_tensor* q,
struct ggml_tensor* k, struct ggml_tensor* k,
struct ggml_tensor* v, struct ggml_tensor* v,
struct ggml_tensor* pe) { struct ggml_tensor* pe,
bool flash_attn) {
// q,k,v: [N, L, n_head, d_head] // q,k,v: [N, L, n_head, d_head]
// pe: [L, d_head/2, 2, 2] // pe: [L, d_head/2, 2, 2]
// return: [N, L, n_head*d_head] // return: [N, L, n_head*d_head]
q = apply_rope(ctx, q, pe); // [N*n_head, L, d_head] q = apply_rope(ctx, q, pe); // [N*n_head, L, d_head]
k = apply_rope(ctx, k, pe); // [N*n_head, L, d_head] k = apply_rope(ctx, k, pe); // [N*n_head, L, d_head]
auto x = ggml_nn_attention_ext(ctx, q, k, v, v->ne[1], NULL, false, true); // [N, L, n_head*d_head] auto x = ggml_nn_attention_ext(ctx, q, k, v, v->ne[1], NULL, false, true, flash_attn); // [N, L, n_head*d_head]
return x; return x;
} }
struct SelfAttention : public GGMLBlock { struct SelfAttention : public GGMLBlock {
public: public:
int64_t num_heads; int64_t num_heads;
bool flash_attn;
public: public:
SelfAttention(int64_t dim, SelfAttention(int64_t dim,
int64_t num_heads = 8, int64_t num_heads = 8,
bool qkv_bias = false) bool qkv_bias = false,
bool flash_attn = false)
: num_heads(num_heads) { : num_heads(num_heads) {
int64_t head_dim = dim / num_heads; int64_t head_dim = dim / num_heads;
blocks["qkv"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * 3, qkv_bias)); blocks["qkv"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * 3, qkv_bias));
@ -168,7 +172,7 @@ namespace Flux {
// pe: [n_token, d_head/2, 2, 2] // pe: [n_token, d_head/2, 2, 2]
// return [N, n_token, dim] // return [N, n_token, dim]
auto qkv = pre_attention(ctx, x); // q,k,v: [N, n_token, n_head, d_head] auto qkv = pre_attention(ctx, x); // q,k,v: [N, n_token, n_head, d_head]
x = attention(ctx, qkv[0], qkv[1], qkv[2], pe); // [N, n_token, dim] x = attention(ctx, qkv[0], qkv[1], qkv[2], pe, flash_attn); // [N, n_token, dim]
x = post_attention(ctx, x); // [N, n_token, dim] x = post_attention(ctx, x); // [N, n_token, dim]
return x; return x;
} }
@ -237,15 +241,19 @@ namespace Flux {
} }
struct DoubleStreamBlock : public GGMLBlock { struct DoubleStreamBlock : public GGMLBlock {
bool flash_attn;
public: public:
DoubleStreamBlock(int64_t hidden_size, DoubleStreamBlock(int64_t hidden_size,
int64_t num_heads, int64_t num_heads,
float mlp_ratio, float mlp_ratio,
bool qkv_bias = false) { bool qkv_bias = false,
bool flash_attn = false)
: flash_attn(flash_attn) {
int64_t mlp_hidden_dim = hidden_size * mlp_ratio; int64_t mlp_hidden_dim = hidden_size * mlp_ratio;
blocks["img_mod"] = std::shared_ptr<GGMLBlock>(new Modulation(hidden_size, true)); blocks["img_mod"] = std::shared_ptr<GGMLBlock>(new Modulation(hidden_size, true));
blocks["img_norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false)); blocks["img_norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false));
blocks["img_attn"] = std::shared_ptr<GGMLBlock>(new SelfAttention(hidden_size, num_heads, qkv_bias)); blocks["img_attn"] = std::shared_ptr<GGMLBlock>(new SelfAttention(hidden_size, num_heads, qkv_bias, flash_attn));
blocks["img_norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false)); blocks["img_norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false));
blocks["img_mlp.0"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, mlp_hidden_dim)); blocks["img_mlp.0"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, mlp_hidden_dim));
@ -254,7 +262,7 @@ namespace Flux {
blocks["txt_mod"] = std::shared_ptr<GGMLBlock>(new Modulation(hidden_size, true)); blocks["txt_mod"] = std::shared_ptr<GGMLBlock>(new Modulation(hidden_size, true));
blocks["txt_norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false)); blocks["txt_norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false));
blocks["txt_attn"] = std::shared_ptr<GGMLBlock>(new SelfAttention(hidden_size, num_heads, qkv_bias)); blocks["txt_attn"] = std::shared_ptr<GGMLBlock>(new SelfAttention(hidden_size, num_heads, qkv_bias, flash_attn));
blocks["txt_norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false)); blocks["txt_norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size, 1e-6f, false));
blocks["txt_mlp.0"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, mlp_hidden_dim)); blocks["txt_mlp.0"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, mlp_hidden_dim));
@ -316,7 +324,7 @@ namespace Flux {
auto k = ggml_concat(ctx, txt_k, img_k, 2); // [N, n_txt_token + n_img_token, n_head, d_head] auto k = ggml_concat(ctx, txt_k, img_k, 2); // [N, n_txt_token + n_img_token, n_head, d_head]
auto v = ggml_concat(ctx, txt_v, img_v, 2); // [N, n_txt_token + n_img_token, n_head, d_head] auto v = ggml_concat(ctx, txt_v, img_v, 2); // [N, n_txt_token + n_img_token, n_head, d_head]
auto attn = attention(ctx, q, k, v, pe); // [N, n_txt_token + n_img_token, n_head*d_head] auto attn = attention(ctx, q, k, v, pe, flash_attn); // [N, n_txt_token + n_img_token, n_head*d_head]
attn = ggml_cont(ctx, ggml_permute(ctx, attn, 0, 2, 1, 3)); // [n_txt_token + n_img_token, N, hidden_size] attn = ggml_cont(ctx, ggml_permute(ctx, attn, 0, 2, 1, 3)); // [n_txt_token + n_img_token, N, hidden_size]
auto txt_attn_out = ggml_view_3d(ctx, auto txt_attn_out = ggml_view_3d(ctx,
attn, attn,
@ -364,13 +372,15 @@ namespace Flux {
int64_t num_heads; int64_t num_heads;
int64_t hidden_size; int64_t hidden_size;
int64_t mlp_hidden_dim; int64_t mlp_hidden_dim;
bool flash_attn;
public: public:
SingleStreamBlock(int64_t hidden_size, SingleStreamBlock(int64_t hidden_size,
int64_t num_heads, int64_t num_heads,
float mlp_ratio = 4.0f, float mlp_ratio = 4.0f,
float qk_scale = 0.f) float qk_scale = 0.f,
: hidden_size(hidden_size), num_heads(num_heads) { bool flash_attn = false)
: hidden_size(hidden_size), num_heads(num_heads), flash_attn(flash_attn) {
int64_t head_dim = hidden_size / num_heads; int64_t head_dim = hidden_size / num_heads;
float scale = qk_scale; float scale = qk_scale;
if (scale <= 0.f) { if (scale <= 0.f) {
@ -433,7 +443,7 @@ namespace Flux {
auto v = ggml_reshape_4d(ctx, qkv_vec[2], head_dim, num_heads, qkv_vec[2]->ne[1], qkv_vec[2]->ne[2]); // [N, n_token, n_head, d_head] auto v = ggml_reshape_4d(ctx, qkv_vec[2], head_dim, num_heads, qkv_vec[2]->ne[1], qkv_vec[2]->ne[2]); // [N, n_token, n_head, d_head]
q = norm->query_norm(ctx, q); q = norm->query_norm(ctx, q);
k = norm->key_norm(ctx, k); k = norm->key_norm(ctx, k);
auto attn = attention(ctx, q, k, v, pe); // [N, n_token, hidden_size] auto attn = attention(ctx, q, k, v, pe, flash_attn); // [N, n_token, hidden_size]
auto attn_mlp = ggml_concat(ctx, attn, ggml_gelu_inplace(ctx, mlp), 0); // [N, n_token, hidden_size + mlp_hidden_dim] auto attn_mlp = ggml_concat(ctx, attn, ggml_gelu_inplace(ctx, mlp), 0); // [N, n_token, hidden_size + mlp_hidden_dim]
auto output = linear2->forward(ctx, attn_mlp); // [N, n_token, hidden_size] auto output = linear2->forward(ctx, attn_mlp); // [N, n_token, hidden_size]
@ -492,6 +502,7 @@ namespace Flux {
int theta = 10000; int theta = 10000;
bool qkv_bias = true; bool qkv_bias = true;
bool guidance_embed = true; bool guidance_embed = true;
bool flash_attn = true;
}; };
struct Flux : public GGMLBlock { struct Flux : public GGMLBlock {
@ -646,13 +657,16 @@ namespace Flux {
blocks["double_blocks." + std::to_string(i)] = std::shared_ptr<GGMLBlock>(new DoubleStreamBlock(params.hidden_size, blocks["double_blocks." + std::to_string(i)] = std::shared_ptr<GGMLBlock>(new DoubleStreamBlock(params.hidden_size,
params.num_heads, params.num_heads,
params.mlp_ratio, params.mlp_ratio,
params.qkv_bias)); params.qkv_bias,
params.flash_attn));
} }
for (int i = 0; i < params.depth_single_blocks; i++) { for (int i = 0; i < params.depth_single_blocks; i++) {
blocks["single_blocks." + std::to_string(i)] = std::shared_ptr<GGMLBlock>(new SingleStreamBlock(params.hidden_size, blocks["single_blocks." + std::to_string(i)] = std::shared_ptr<GGMLBlock>(new SingleStreamBlock(params.hidden_size,
params.num_heads, params.num_heads,
params.mlp_ratio)); params.mlp_ratio,
0.f,
params.flash_attn));
} }
blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new LastLayer(params.hidden_size, 1, out_channels)); blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new LastLayer(params.hidden_size, 1, out_channels));
@ -711,7 +725,8 @@ namespace Flux {
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor* guidance, struct ggml_tensor* guidance,
struct ggml_tensor* pe) { struct ggml_tensor* pe,
std::vector<int> skip_layers = std::vector<int>()) {
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]); auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
auto time_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["time_in"]); auto time_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["time_in"]);
auto vector_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["vector_in"]); auto vector_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["vector_in"]);
@ -733,6 +748,10 @@ namespace Flux {
txt = txt_in->forward(ctx, txt); txt = txt_in->forward(ctx, txt);
for (int i = 0; i < params.depth; i++) { for (int i = 0; i < params.depth; i++) {
if (skip_layers.size() > 0 && std::find(skip_layers.begin(), skip_layers.end(), i) != skip_layers.end()) {
continue;
}
auto block = std::dynamic_pointer_cast<DoubleStreamBlock>(blocks["double_blocks." + std::to_string(i)]); auto block = std::dynamic_pointer_cast<DoubleStreamBlock>(blocks["double_blocks." + std::to_string(i)]);
auto img_txt = block->forward(ctx, img, txt, vec, pe); auto img_txt = block->forward(ctx, img, txt, vec, pe);
@ -742,6 +761,9 @@ namespace Flux {
auto txt_img = ggml_concat(ctx, txt, img, 1); // [N, n_txt_token + n_img_token, hidden_size] auto txt_img = ggml_concat(ctx, txt, img, 1); // [N, n_txt_token + n_img_token, hidden_size]
for (int i = 0; i < params.depth_single_blocks; i++) { for (int i = 0; i < params.depth_single_blocks; i++) {
if (skip_layers.size() > 0 && std::find(skip_layers.begin(), skip_layers.end(), i + params.depth) != skip_layers.end()) {
continue;
}
auto block = std::dynamic_pointer_cast<SingleStreamBlock>(blocks["single_blocks." + std::to_string(i)]); auto block = std::dynamic_pointer_cast<SingleStreamBlock>(blocks["single_blocks." + std::to_string(i)]);
txt_img = block->forward(ctx, txt_img, vec, pe); txt_img = block->forward(ctx, txt_img, vec, pe);
@ -769,7 +791,8 @@ namespace Flux {
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor* guidance, struct ggml_tensor* guidance,
struct ggml_tensor* pe) { struct ggml_tensor* pe,
std::vector<int> skip_layers = std::vector<int>()) {
// Forward pass of DiT. // Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
// timestep: (N,) tensor of diffusion timesteps // timestep: (N,) tensor of diffusion timesteps
@ -791,7 +814,7 @@ namespace Flux {
// img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) // img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size)
auto img = patchify(ctx, x, patch_size); // [N, h*w, C * patch_size * patch_size] auto img = patchify(ctx, x, patch_size); // [N, h*w, C * patch_size * patch_size]
auto out = forward_orig(ctx, img, context, timestep, y, guidance, pe); // [N, h*w, C * patch_size * patch_size] auto out = forward_orig(ctx, img, context, timestep, y, guidance, pe, skip_layers); // [N, h*w, C * patch_size * patch_size]
// rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2) // rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)
out = unpatchify(ctx, out, (H + pad_h) / patch_size, (W + pad_w) / patch_size, patch_size); // [N, C, H + pad_h, W + pad_w] out = unpatchify(ctx, out, (H + pad_h) / patch_size, (W + pad_w) / patch_size, patch_size); // [N, C, H + pad_h, W + pad_w]
@ -801,20 +824,55 @@ namespace Flux {
}; };
struct FluxRunner : public GGMLRunner { struct FluxRunner : public GGMLRunner {
static std::map<std::string, enum ggml_type> empty_tensor_types;
public: public:
FluxParams flux_params; FluxParams flux_params;
Flux flux; Flux flux;
std::vector<float> pe_vec; // for cache std::vector<float> pe_vec; // for cache
FluxRunner(ggml_backend_t backend, FluxRunner(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types = empty_tensor_types,
SDVersion version = VERSION_FLUX_DEV) const std::string prefix = "",
: GGMLRunner(backend, wtype) { bool flash_attn = false)
if (version == VERSION_FLUX_SCHNELL) { : GGMLRunner(backend) {
flux_params.flash_attn = flash_attn;
flux_params.guidance_embed = false; flux_params.guidance_embed = false;
flux_params.depth = 0;
flux_params.depth_single_blocks = 0;
for (auto pair : tensor_types) {
std::string tensor_name = pair.first;
if (tensor_name.find("model.diffusion_model.") == std::string::npos)
continue;
if (tensor_name.find("guidance_in.in_layer.weight") != std::string::npos) {
// not schnell
flux_params.guidance_embed = true;
} }
size_t db = tensor_name.find("double_blocks.");
if (db != std::string::npos) {
tensor_name = tensor_name.substr(db); // remove prefix
int block_depth = atoi(tensor_name.substr(14, tensor_name.find(".", 14)).c_str());
if (block_depth + 1 > flux_params.depth) {
flux_params.depth = block_depth + 1;
}
}
size_t sb = tensor_name.find("single_blocks.");
if (sb != std::string::npos) {
tensor_name = tensor_name.substr(sb); // remove prefix
int block_depth = atoi(tensor_name.substr(14, tensor_name.find(".", 14)).c_str());
if (block_depth + 1 > flux_params.depth_single_blocks) {
flux_params.depth_single_blocks = block_depth + 1;
}
}
}
LOG_INFO("Flux blocks: %d double, %d single", flux_params.depth, flux_params.depth_single_blocks);
if (!flux_params.guidance_embed) {
LOG_INFO("Flux guidance is disabled (Schnell mode)");
}
flux = Flux(flux_params); flux = Flux(flux_params);
flux.init(params_ctx, wtype); flux.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {
@ -829,7 +887,8 @@ namespace Flux {
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor* guidance) { struct ggml_tensor* guidance,
std::vector<int> skip_layers = std::vector<int>()) {
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, FLUX_GRAPH_SIZE, false); struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, FLUX_GRAPH_SIZE, false);
@ -856,7 +915,8 @@ namespace Flux {
context, context,
y, y,
guidance, guidance,
pe); pe,
skip_layers);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
@ -870,14 +930,15 @@ namespace Flux {
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor* guidance, struct ggml_tensor* guidance,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
// guidance: [N, ] // guidance: [N, ]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(x, timesteps, context, y, guidance); return build_graph(x, timesteps, context, y, guidance, skip_layers);
}; };
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
@ -929,7 +990,7 @@ namespace Flux {
// ggml_backend_t backend = ggml_backend_cuda_init(0); // ggml_backend_t backend = ggml_backend_cuda_init(0);
ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_t backend = ggml_backend_cpu_init();
ggml_type model_data_type = GGML_TYPE_Q8_0; ggml_type model_data_type = GGML_TYPE_Q8_0;
std::shared_ptr<FluxRunner> flux = std::shared_ptr<FluxRunner>(new FluxRunner(backend, model_data_type)); std::shared_ptr<FluxRunner> flux = std::shared_ptr<FluxRunner>(new FluxRunner(backend));
{ {
LOG_INFO("loading from '%s'", file_path.c_str()); LOG_INFO("loading from '%s'", file_path.c_str());

View file

@ -22,8 +22,10 @@
#include "ggml-alloc.h" #include "ggml-alloc.h"
#include "ggml-backend.h" #include "ggml-backend.h"
#include "ggml.h"
#include "ggml-cpu.h" #include "ggml-cpu.h"
#include "ggml.h"
#include "model.h"
#ifdef SD_USE_CUBLAS #ifdef SD_USE_CUBLAS
#include "ggml-cuda.h" #include "ggml-cuda.h"
@ -101,17 +103,11 @@ __STATIC_INLINE__ ggml_fp16_t ggml_tensor_get_f16(const ggml_tensor* tensor, int
// static struct ggml_tensor* get_tensor_from_graph(struct ggml_cgraph* gf, const char* name) { // static struct ggml_tensor* get_tensor_from_graph(struct ggml_cgraph* gf, const char* name) {
// struct ggml_tensor* res = NULL; // struct ggml_tensor* res = NULL;
// for (int i = 0; i < gf->n_nodes; i++) { // for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
// // printf("%d, %s \n", i, gf->nodes[i]->name); // struct ggml_tensor* node = ggml_graph_node(gf, i);
// if (strcmp(ggml_get_name(gf->nodes[i]), name) == 0) { // // printf("%d, %s \n", i, ggml_get_name(node));
// res = gf->nodes[i]; // if (strcmp(ggml_get_name(node), name) == 0) {
// break; // res = node;
// }
// }
// for (int i = 0; i < gf->n_leafs; i++) {
// // printf("%d, %s \n", i, gf->leafs[i]->name);
// if (strcmp(ggml_get_name(gf->leafs[i]), name) == 0) {
// res = gf->leafs[i];
// break; // break;
// } // }
// } // }
@ -680,14 +676,12 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention(struct ggml_context* ctx
struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head] struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head]
#else #else
float d_head = (float)q->ne[0]; float d_head = (float)q->ne[0];
struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k] struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k]
kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head)); kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head));
if (mask) { if (mask) {
kq = ggml_diag_mask_inf_inplace(ctx, kq, 0); kq = ggml_diag_mask_inf_inplace(ctx, kq, 0);
} }
kq = ggml_soft_max_inplace(ctx, kq); kq = ggml_soft_max_inplace(ctx, kq);
struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head] struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head]
#endif #endif
return kqv; return kqv;
@ -704,7 +698,8 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention_ext(struct ggml_context*
int64_t n_head, int64_t n_head,
struct ggml_tensor* mask = NULL, struct ggml_tensor* mask = NULL,
bool diag_mask_inf = false, bool diag_mask_inf = false,
bool skip_reshape = false) { bool skip_reshape = false,
bool flash_attn = false) {
int64_t L_q; int64_t L_q;
int64_t L_k; int64_t L_k;
int64_t C; int64_t C;
@ -735,13 +730,42 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention_ext(struct ggml_context*
float scale = (1.0f / sqrt((float)d_head)); float scale = (1.0f / sqrt((float)d_head));
bool use_flash_attn = false; // if (flash_attn) {
ggml_tensor* kqv = NULL; // LOG_DEBUG("attention_ext L_q:%d L_k:%d n_head:%d C:%d d_head:%d N:%d", L_q, L_k, n_head, C, d_head, N);
if (use_flash_attn) { // }
// is there anything oddly shaped?? ping Green-Sky if you can trip this assert
GGML_ASSERT(((L_k % 256 == 0) && L_q == L_k) || !(L_k % 256 == 0));
bool can_use_flash_attn = true;
can_use_flash_attn = can_use_flash_attn && L_k % 256 == 0;
can_use_flash_attn = can_use_flash_attn && d_head % 64 == 0; // double check
// cuda max d_head seems to be 256, cpu does seem to work with 512
can_use_flash_attn = can_use_flash_attn && d_head <= 256; // double check
if (mask != nullptr) {
// TODO(Green-Sky): figure out if we can bend t5 to work too
can_use_flash_attn = can_use_flash_attn && mask->ne[2] == 1;
can_use_flash_attn = can_use_flash_attn && mask->ne[3] == 1;
}
// TODO(Green-Sky): more pad or disable for funny tensor shapes
ggml_tensor* kqv = nullptr;
// GGML_ASSERT((flash_attn && can_use_flash_attn) || !flash_attn);
if (can_use_flash_attn && flash_attn) {
// LOG_DEBUG("using flash attention");
k = ggml_cast(ctx, k, GGML_TYPE_F16);
v = ggml_cont(ctx, ggml_permute(ctx, v, 0, 2, 1, 3)); // [N, n_head, L_k, d_head] v = ggml_cont(ctx, ggml_permute(ctx, v, 0, 2, 1, 3)); // [N, n_head, L_k, d_head]
v = ggml_reshape_3d(ctx, v, d_head, L_k, n_head * N); // [N * n_head, L_k, d_head] v = ggml_reshape_3d(ctx, v, d_head, L_k, n_head * N); // [N * n_head, L_k, d_head]
LOG_DEBUG("k->ne[1] == %d", k->ne[1]); v = ggml_cast(ctx, v, GGML_TYPE_F16);
kqv = ggml_flash_attn_ext(ctx, q, k, v, mask, scale, 0, 0); kqv = ggml_flash_attn_ext(ctx, q, k, v, mask, scale, 0, 0);
ggml_flash_attn_ext_set_prec(kqv, GGML_PREC_F32);
// kqv = ggml_view_3d(ctx, kqv, d_head, n_head, L_k, kqv->nb[1], kqv->nb[2], 0);
kqv = ggml_view_3d(ctx, kqv, d_head, n_head, L_q, kqv->nb[1], kqv->nb[2], 0);
} else { } else {
v = ggml_cont(ctx, ggml_permute(ctx, v, 1, 2, 0, 3)); // [N, n_head, d_head, L_k] v = ggml_cont(ctx, ggml_permute(ctx, v, 1, 2, 0, 3)); // [N, n_head, d_head, L_k]
v = ggml_reshape_3d(ctx, v, L_k, d_head, n_head * N); // [N * n_head, d_head, L_k] v = ggml_reshape_3d(ctx, v, L_k, d_head, n_head * N); // [N * n_head, d_head, L_k]
@ -757,10 +781,12 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention_ext(struct ggml_context*
kq = ggml_soft_max_inplace(ctx, kq); kq = ggml_soft_max_inplace(ctx, kq);
kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, L_q, d_head] kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, L_q, d_head]
}
kqv = ggml_reshape_4d(ctx, kqv, d_head, L_q, n_head, N); // [N, n_head, L_q, d_head] kqv = ggml_reshape_4d(ctx, kqv, d_head, L_q, n_head, N); // [N, n_head, L_q, d_head]
kqv = ggml_cont(ctx, ggml_permute(ctx, kqv, 0, 2, 1, 3)); // [N, L_q, n_head, d_head] kqv = ggml_permute(ctx, kqv, 0, 2, 1, 3); // [N, L_q, n_head, d_head]
}
kqv = ggml_cont(ctx, kqv);
kqv = ggml_reshape_3d(ctx, kqv, d_head * n_head, L_q, N); // [N, L_q, C] kqv = ggml_reshape_3d(ctx, kqv, d_head * n_head, L_q, N); // [N, L_q, C]
return kqv; return kqv;
@ -940,7 +966,6 @@ protected:
std::map<struct ggml_tensor*, const void*> backend_tensor_data_map; std::map<struct ggml_tensor*, const void*> backend_tensor_data_map;
ggml_type wtype = GGML_TYPE_F32;
ggml_backend_t backend = NULL; ggml_backend_t backend = NULL;
void alloc_params_ctx() { void alloc_params_ctx() {
@ -1016,8 +1041,8 @@ protected:
public: public:
virtual std::string get_desc() = 0; virtual std::string get_desc() = 0;
GGMLRunner(ggml_backend_t backend, ggml_type wtype = GGML_TYPE_F32) GGMLRunner(ggml_backend_t backend)
: backend(backend), wtype(wtype) { : backend(backend) {
alloc_params_ctx(); alloc_params_ctx();
} }
@ -1048,6 +1073,11 @@ public:
params_buffer_size / (1024.0 * 1024.0), params_buffer_size / (1024.0 * 1024.0),
ggml_backend_is_cpu(backend) ? "RAM" : "VRAM", ggml_backend_is_cpu(backend) ? "RAM" : "VRAM",
num_tensors); num_tensors);
// printf("%s params backend buffer size = % 6.2f MB(%s) (%i tensors)\n",
// get_desc().c_str(),
// params_buffer_size / (1024.0 * 1024.0),
// ggml_backend_is_cpu(backend) ? "RAM" : "VRAM",
// num_tensors);
return true; return true;
} }
@ -1141,20 +1171,22 @@ protected:
GGMLBlockMap blocks; GGMLBlockMap blocks;
ParameterMap params; ParameterMap params;
void init_blocks(struct ggml_context* ctx, ggml_type wtype) { void init_blocks(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
for (auto& pair : blocks) { for (auto& pair : blocks) {
auto& block = pair.second; auto& block = pair.second;
block->init(ctx, tensor_types, prefix + pair.first);
block->init(ctx, wtype);
} }
} }
virtual void init_params(struct ggml_context* ctx, ggml_type wtype) {} virtual void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {}
public: public:
void init(struct ggml_context* ctx, ggml_type wtype) { void init(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
init_blocks(ctx, wtype); if (prefix.size() > 0) {
init_params(ctx, wtype); prefix = prefix + ".";
}
init_blocks(ctx, tensor_types, prefix);
init_params(ctx, tensor_types, prefix);
} }
size_t get_params_num() { size_t get_params_num() {
@ -1210,13 +1242,15 @@ protected:
bool bias; bool bias;
bool force_f32; bool force_f32;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
if (in_features % ggml_blck_size(wtype) != 0 || force_f32) { if (in_features % ggml_blck_size(wtype) != 0 || force_f32) {
wtype = GGML_TYPE_F32; wtype = GGML_TYPE_F32;
} }
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features); params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
if (bias) { if (bias) {
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_features); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_features);
} }
} }
@ -1244,8 +1278,8 @@ class Embedding : public UnaryBlock {
protected: protected:
int64_t embedding_dim; int64_t embedding_dim;
int64_t num_embeddings; int64_t num_embeddings;
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
void init_params(struct ggml_context* ctx, ggml_type wtype) { enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings); params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings);
} }
@ -1284,10 +1318,12 @@ protected:
std::pair<int, int> dilation; std::pair<int, int> dilation;
bool bias; bool bias;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kernel_size.second, kernel_size.first, in_channels, out_channels); enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16;
params["weight"] = ggml_new_tensor_4d(ctx, wtype, kernel_size.second, kernel_size.first, in_channels, out_channels);
if (bias) { if (bias) {
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels); enum ggml_type wtype = GGML_TYPE_F32; // (tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels);
} }
} }
@ -1327,10 +1363,12 @@ protected:
int64_t dilation; int64_t dilation;
bool bias; bool bias;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 1, kernel_size, in_channels, out_channels); // 5d => 4d enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16;
params["weight"] = ggml_new_tensor_4d(ctx, wtype, 1, kernel_size, in_channels, out_channels); // 5d => 4d
if (bias) { if (bias) {
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels);
} }
} }
@ -1369,11 +1407,13 @@ protected:
bool elementwise_affine; bool elementwise_affine;
bool bias; bool bias;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
if (elementwise_affine) { if (elementwise_affine) {
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
params["weight"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape);
if (bias) { if (bias) {
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
params["bias"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape);
} }
} }
} }
@ -1409,10 +1449,12 @@ protected:
float eps; float eps;
bool affine; bool affine;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
if (affine) { if (affine) {
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels); enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
params["weight"] = ggml_new_tensor_1d(ctx, wtype, num_channels);
params["bias"] = ggml_new_tensor_1d(ctx, bias_wtype, num_channels);
} }
} }

View file

@ -16,10 +16,9 @@ struct LoraModel : public GGMLRunner {
ggml_tensor* zero_index = NULL; ggml_tensor* zero_index = NULL;
LoraModel(ggml_backend_t backend, LoraModel(ggml_backend_t backend,
ggml_type wtype,
const std::string& file_path = "", const std::string& file_path = "",
const std::string& prefix = "") const std::string prefix = "")
: file_path(file_path), GGMLRunner(backend, wtype) { : file_path(file_path), GGMLRunner(backend) {
if (!model_loader.init_from_file(file_path, prefix)) { if (!model_loader.init_from_file(file_path, prefix)) {
load_failed = true; load_failed = true;
} }

View file

@ -114,9 +114,15 @@ struct SDParams {
bool normalize_input = false; bool normalize_input = false;
bool clip_on_cpu = false; bool clip_on_cpu = false;
bool vae_on_cpu = false; bool vae_on_cpu = false;
bool diffusion_flash_attn = false;
bool canny_preprocess = false; bool canny_preprocess = false;
bool color = false; bool color = false;
int upscale_repeats = 1; int upscale_repeats = 1;
std::vector<int> skip_layers = {7, 8, 9};
float slg_scale = 0.;
float skip_layer_start = 0.01;
float skip_layer_end = 0.2;
}; };
void print_params(SDParams params) { void print_params(SDParams params) {
@ -144,11 +150,13 @@ void print_params(SDParams params) {
printf(" clip on cpu: %s\n", params.clip_on_cpu ? "true" : "false"); printf(" clip on cpu: %s\n", params.clip_on_cpu ? "true" : "false");
printf(" controlnet cpu: %s\n", params.control_net_cpu ? "true" : "false"); printf(" controlnet cpu: %s\n", params.control_net_cpu ? "true" : "false");
printf(" vae decoder on cpu:%s\n", params.vae_on_cpu ? "true" : "false"); printf(" vae decoder on cpu:%s\n", params.vae_on_cpu ? "true" : "false");
printf(" diffusion flash attention:%s\n", params.diffusion_flash_attn ? "true" : "false");
printf(" strength(control): %.2f\n", params.control_strength); printf(" strength(control): %.2f\n", params.control_strength);
printf(" prompt: %s\n", params.prompt.c_str()); printf(" prompt: %s\n", params.prompt.c_str());
printf(" negative_prompt: %s\n", params.negative_prompt.c_str()); printf(" negative_prompt: %s\n", params.negative_prompt.c_str());
printf(" min_cfg: %.2f\n", params.min_cfg); printf(" min_cfg: %.2f\n", params.min_cfg);
printf(" cfg_scale: %.2f\n", params.cfg_scale); printf(" cfg_scale: %.2f\n", params.cfg_scale);
printf(" slg_scale: %.2f\n", params.slg_scale);
printf(" guidance: %.2f\n", params.guidance); printf(" guidance: %.2f\n", params.guidance);
printf(" clip_skip: %d\n", params.clip_skip); printf(" clip_skip: %d\n", params.clip_skip);
printf(" width: %d\n", params.width); printf(" width: %d\n", params.width);
@ -175,7 +183,7 @@ void print_usage(int argc, const char* argv[]) {
printf(" -m, --model [MODEL] path to full model\n"); printf(" -m, --model [MODEL] path to full model\n");
printf(" --diffusion-model path to the standalone diffusion model\n"); printf(" --diffusion-model path to the standalone diffusion model\n");
printf(" --clip_l path to the clip-l text encoder\n"); printf(" --clip_l path to the clip-l text encoder\n");
printf(" --clip_g path to the clip-l text encoder\n"); printf(" --clip_g path to the clip-g text encoder\n");
printf(" --t5xxl path to the the t5xxl text encoder\n"); printf(" --t5xxl path to the the t5xxl text encoder\n");
printf(" --vae [VAE] path to vae\n"); printf(" --vae [VAE] path to vae\n");
printf(" --taesd [TAESD_PATH] path to taesd. Using Tiny AutoEncoder for fast decoding (low quality)\n"); printf(" --taesd [TAESD_PATH] path to taesd. Using Tiny AutoEncoder for fast decoding (low quality)\n");
@ -186,7 +194,7 @@ void print_usage(int argc, const char* argv[]) {
printf(" --normalize-input normalize PHOTOMAKER input id images\n"); printf(" --normalize-input normalize PHOTOMAKER input id images\n");
printf(" --upscale-model [ESRGAN_PATH] path to esrgan model. Upscale images after generate, just RealESRGAN_x4plus_anime_6B supported by now\n"); printf(" --upscale-model [ESRGAN_PATH] path to esrgan model. Upscale images after generate, just RealESRGAN_x4plus_anime_6B supported by now\n");
printf(" --upscale-repeats Run the ESRGAN upscaler this many times (default 1)\n"); printf(" --upscale-repeats Run the ESRGAN upscaler this many times (default 1)\n");
printf(" --type [TYPE] weight type (f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_k, q3_k, q4_k)\n"); printf(" --type [TYPE] weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K)\n");
printf(" If not specified, the default is the type of the weight file\n"); printf(" If not specified, the default is the type of the weight file\n");
printf(" --lora-model-dir [DIR] lora model directory\n"); printf(" --lora-model-dir [DIR] lora model directory\n");
printf(" -i, --init-img [IMAGE] path to the input image, required by img2img\n"); printf(" -i, --init-img [IMAGE] path to the input image, required by img2img\n");
@ -195,6 +203,12 @@ void print_usage(int argc, const char* argv[]) {
printf(" -p, --prompt [PROMPT] the prompt to render\n"); printf(" -p, --prompt [PROMPT] the prompt to render\n");
printf(" -n, --negative-prompt PROMPT the negative prompt (default: \"\")\n"); printf(" -n, --negative-prompt PROMPT the negative prompt (default: \"\")\n");
printf(" --cfg-scale SCALE unconditional guidance scale: (default: 7.0)\n"); printf(" --cfg-scale SCALE unconditional guidance scale: (default: 7.0)\n");
printf(" --slg-scale SCALE skip layer guidance (SLG) scale, only for DiT models: (default: 0)\n");
printf(" 0 means disabled, a value of 2.5 is nice for sd3.5 medium\n");
printf(" --skip_layers LAYERS Layers to skip for SLG steps: (default: [7,8,9])\n");
printf(" --skip_layer_start START SLG enabling point: (default: 0.01)\n");
printf(" --skip_layer_end END SLG disabling point: (default: 0.2)\n");
printf(" SLG will be enabled at step int([STEPS]*[START]) and disabled at int([STEPS]*[END])\n");
printf(" --strength STRENGTH strength for noising/unnoising (default: 0.75)\n"); printf(" --strength STRENGTH strength for noising/unnoising (default: 0.75)\n");
printf(" --style-ratio STYLE-RATIO strength for keeping input identity (default: 20%%)\n"); printf(" --style-ratio STYLE-RATIO strength for keeping input identity (default: 20%%)\n");
printf(" --control-strength STRENGTH strength to apply Control Net (default: 0.9)\n"); printf(" --control-strength STRENGTH strength to apply Control Net (default: 0.9)\n");
@ -213,6 +227,9 @@ void print_usage(int argc, const char* argv[]) {
printf(" --vae-tiling process vae in tiles to reduce memory usage\n"); printf(" --vae-tiling process vae in tiles to reduce memory usage\n");
printf(" --vae-on-cpu keep vae in cpu (for low vram)\n"); printf(" --vae-on-cpu keep vae in cpu (for low vram)\n");
printf(" --clip-on-cpu keep clip in cpu (for low vram)\n"); printf(" --clip-on-cpu keep clip in cpu (for low vram)\n");
printf(" --diffusion-fa use flash attention in the diffusion model (for low vram)\n");
printf(" Might lower quality, since it implies converting k and v to f16.\n");
printf(" This might crash if it is not supported by the backend.\n");
printf(" --control-net-cpu keep controlnet in cpu (for low vram)\n"); printf(" --control-net-cpu keep controlnet in cpu (for low vram)\n");
printf(" --canny apply canny preprocessor (edge detection)\n"); printf(" --canny apply canny preprocessor (edge detection)\n");
printf(" --color Colors the logging tags according to level\n"); printf(" --color Colors the logging tags according to level\n");
@ -328,29 +345,29 @@ void parse_args(int argc, const char** argv, SDParams& params) {
break; break;
} }
std::string type = argv[i]; std::string type = argv[i];
if (type == "f32") { bool found = false;
params.wtype = SD_TYPE_F32; std::string valid_types = "";
} else if (type == "f16") { for (size_t i = 0; i < SD_TYPE_COUNT; i++) {
params.wtype = SD_TYPE_F16; auto trait = ggml_get_type_traits((ggml_type)i);
} else if (type == "q4_0") { std::string name(trait->type_name);
params.wtype = SD_TYPE_Q4_0; if (name == "f32" || trait->to_float && trait->type_size) {
} else if (type == "q4_1") { if (i)
params.wtype = SD_TYPE_Q4_1; valid_types += ", ";
} else if (type == "q5_0") { valid_types += name;
params.wtype = SD_TYPE_Q5_0; if (type == name) {
} else if (type == "q5_1") { if (ggml_quantize_requires_imatrix((ggml_type)i)) {
params.wtype = SD_TYPE_Q5_1; printf("\033[35;1m[WARNING]\033[0m: type %s requires imatrix to work properly. A dummy imatrix will be used, expect poor quality.\n", trait->type_name);
} else if (type == "q8_0") { }
params.wtype = SD_TYPE_Q8_0; params.wtype = (enum sd_type_t)i;
} else if (type == "q2_k") { found = true;
params.wtype = SD_TYPE_Q2_K; break;
} else if (type == "q3_k") { }
params.wtype = SD_TYPE_Q3_K; }
} else if (type == "q4_k") { }
params.wtype = SD_TYPE_Q4_K; if (!found) {
} else { fprintf(stderr, "error: invalid weight format %s, must be one of [%s]\n",
fprintf(stderr, "error: invalid weight format %s, must be one of [f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_k, q3_k, q4_k]\n", type.c_str(),
type.c_str()); valid_types.c_str());
exit(1); exit(1);
} }
} else if (arg == "--lora-model-dir") { } else if (arg == "--lora-model-dir") {
@ -463,6 +480,8 @@ void parse_args(int argc, const char** argv, SDParams& params) {
params.clip_on_cpu = true; // will slow down get_learned_condiotion but necessary for low MEM GPUs params.clip_on_cpu = true; // will slow down get_learned_condiotion but necessary for low MEM GPUs
} else if (arg == "--vae-on-cpu") { } else if (arg == "--vae-on-cpu") {
params.vae_on_cpu = true; // will slow down latent decoding but necessary for low MEM GPUs params.vae_on_cpu = true; // will slow down latent decoding but necessary for low MEM GPUs
} else if (arg == "--diffusion-fa") {
params.diffusion_flash_attn = true; // can reduce MEM significantly
} else if (arg == "--canny") { } else if (arg == "--canny") {
params.canny_preprocess = true; params.canny_preprocess = true;
} else if (arg == "-b" || arg == "--batch-count") { } else if (arg == "-b" || arg == "--batch-count") {
@ -532,6 +551,61 @@ void parse_args(int argc, const char** argv, SDParams& params) {
params.verbose = true; params.verbose = true;
} else if (arg == "--color") { } else if (arg == "--color") {
params.color = true; params.color = true;
} else if (arg == "--slg-scale") {
if (++i >= argc) {
invalid_arg = true;
break;
}
params.slg_scale = std::stof(argv[i]);
} else if (arg == "--skip-layers") {
if (++i >= argc) {
invalid_arg = true;
break;
}
if (argv[i][0] != '[') {
invalid_arg = true;
break;
}
std::string layers_str = argv[i];
while (layers_str.back() != ']') {
if (++i >= argc) {
invalid_arg = true;
break;
}
layers_str += " " + std::string(argv[i]);
}
layers_str = layers_str.substr(1, layers_str.size() - 2);
std::regex regex("[, ]+");
std::sregex_token_iterator iter(layers_str.begin(), layers_str.end(), regex, -1);
std::sregex_token_iterator end;
std::vector<std::string> tokens(iter, end);
std::vector<int> layers;
for (const auto& token : tokens) {
try {
layers.push_back(std::stoi(token));
} catch (const std::invalid_argument& e) {
invalid_arg = true;
break;
}
}
params.skip_layers = layers;
if (invalid_arg) {
break;
}
} else if (arg == "--skip-layer-start") {
if (++i >= argc) {
invalid_arg = true;
break;
}
params.skip_layer_start = std::stof(argv[i]);
} else if (arg == "--skip-layer-end") {
if (++i >= argc) {
invalid_arg = true;
break;
}
params.skip_layer_end = std::stof(argv[i]);
} else { } else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
print_usage(argc, argv); print_usage(argc, argv);
@ -622,6 +696,16 @@ std::string get_image_params(SDParams params, int64_t seed) {
} }
parameter_string += "Steps: " + std::to_string(params.sample_steps) + ", "; parameter_string += "Steps: " + std::to_string(params.sample_steps) + ", ";
parameter_string += "CFG scale: " + std::to_string(params.cfg_scale) + ", "; parameter_string += "CFG scale: " + std::to_string(params.cfg_scale) + ", ";
if (params.slg_scale != 0 && params.skip_layers.size() != 0) {
parameter_string += "SLG scale: " + std::to_string(params.cfg_scale) + ", ";
parameter_string += "Skip layers: [";
for (const auto& layer : params.skip_layers) {
parameter_string += std::to_string(layer) + ", ";
}
parameter_string += "], ";
parameter_string += "Skip layer start: " + std::to_string(params.skip_layer_start) + ", ";
parameter_string += "Skip layer end: " + std::to_string(params.skip_layer_end) + ", ";
}
parameter_string += "Guidance: " + std::to_string(params.guidance) + ", "; parameter_string += "Guidance: " + std::to_string(params.guidance) + ", ";
parameter_string += "Seed: " + std::to_string(seed) + ", "; parameter_string += "Seed: " + std::to_string(seed) + ", ";
parameter_string += "Size: " + std::to_string(params.width) + "x" + std::to_string(params.height) + ", "; parameter_string += "Size: " + std::to_string(params.width) + "x" + std::to_string(params.height) + ", ";
@ -679,7 +763,6 @@ void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
fflush(out_stream); fflush(out_stream);
} }
//concedo notes: if it crashes, make sure you specify --type!
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
SDParams params; SDParams params;
@ -790,7 +873,8 @@ int main(int argc, const char* argv[]) {
params.schedule, params.schedule,
params.clip_on_cpu, params.clip_on_cpu,
params.control_net_cpu, params.control_net_cpu,
params.vae_on_cpu); params.vae_on_cpu,
params.diffusion_flash_attn);
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
printf("new_sd_ctx_t failed\n"); printf("new_sd_ctx_t failed\n");
@ -839,7 +923,12 @@ int main(int argc, const char* argv[]) {
params.control_strength, params.control_strength,
params.style_ratio, params.style_ratio,
params.normalize_input, params.normalize_input,
params.input_id_images_path.c_str()); params.input_id_images_path.c_str(),
params.skip_layers.data(),
params.skip_layers.size(),
params.slg_scale,
params.skip_layer_start,
params.skip_layer_end);
} else { } else {
sd_image_t input_image = {(uint32_t)params.width, sd_image_t input_image = {(uint32_t)params.width,
(uint32_t)params.height, (uint32_t)params.height,
@ -901,7 +990,12 @@ int main(int argc, const char* argv[]) {
params.control_strength, params.control_strength,
params.style_ratio, params.style_ratio,
params.normalize_input, params.normalize_input,
params.input_id_images_path.c_str()); params.input_id_images_path.c_str(),
params.skip_layers.data(),
params.skip_layers.size(),
params.slg_scale,
params.skip_layer_start,
params.skip_layer_end);
} }
} }
@ -914,8 +1008,7 @@ int main(int argc, const char* argv[]) {
int upscale_factor = 4; // unused for RealESRGAN_x4plus_anime_6B.pth int upscale_factor = 4; // unused for RealESRGAN_x4plus_anime_6B.pth
if (params.esrgan_path.size() > 0 && params.upscale_repeats > 0) { if (params.esrgan_path.size() > 0 && params.upscale_repeats > 0) {
upscaler_ctx_t* upscaler_ctx = new_upscaler_ctx(params.esrgan_path.c_str(), upscaler_ctx_t* upscaler_ctx = new_upscaler_ctx(params.esrgan_path.c_str(),
params.n_threads, params.n_threads);
params.wtype);
if (upscaler_ctx == NULL) { if (upscaler_ctx == NULL) {
printf("new_upscaler_ctx failed\n"); printf("new_upscaler_ctx failed\n");

View file

@ -147,8 +147,9 @@ protected:
int64_t hidden_size; int64_t hidden_size;
float eps; float eps;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
} }
public: public:
@ -636,7 +637,6 @@ public:
struct MMDiT : public GGMLBlock { struct MMDiT : public GGMLBlock {
// Diffusion model with a Transformer backbone. // Diffusion model with a Transformer backbone.
protected: protected:
SDVersion version = VERSION_SD3_2B;
int64_t input_size = -1; int64_t input_size = -1;
int64_t patch_size = 2; int64_t patch_size = 2;
int64_t in_channels = 16; int64_t in_channels = 16;
@ -652,13 +652,13 @@ protected:
int64_t hidden_size; int64_t hidden_size;
std::string qk_norm; std::string qk_norm;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
params["pos_embed"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hidden_size, num_patchs, 1); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "pos_embed") != tensor_types.end()) ? tensor_types[prefix + "pos_embed"] : GGML_TYPE_F32;
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
} }
public: public:
MMDiT(SDVersion version = VERSION_SD3_2B) MMDiT(std::map<std::string, enum ggml_type>& tensor_types) {
: version(version) {
// input_size is always None // input_size is always None
// learn_sigma is always False // learn_sigma is always False
// register_length is alwalys 0 // register_length is alwalys 0
@ -670,48 +670,44 @@ public:
// pos_embed_scaling_factor is not used // pos_embed_scaling_factor is not used
// pos_embed_offset is not used // pos_embed_offset is not used
// context_embedder_config is always {'target': 'torch.nn.Linear', 'params': {'in_features': 4096, 'out_features': 1536}} // context_embedder_config is always {'target': 'torch.nn.Linear', 'params': {'in_features': 4096, 'out_features': 1536}}
if (version == VERSION_SD3_2B) {
input_size = -1; // read tensors from tensor_types
patch_size = 2; for (auto pair : tensor_types) {
in_channels = 16; std::string tensor_name = pair.first;
depth = 24; if (tensor_name.find("model.diffusion_model.") == std::string::npos)
mlp_ratio = 4.0f; continue;
adm_in_channels = 2048; size_t jb = tensor_name.find("joint_blocks.");
out_channels = 16; if (jb != std::string::npos) {
pos_embed_max_size = 192; tensor_name = tensor_name.substr(jb); // remove prefix
num_patchs = 36864; // 192 * 192 int block_depth = atoi(tensor_name.substr(13, tensor_name.find(".", 13)).c_str());
context_size = 4096; if (block_depth + 1 > depth) {
context_embedder_out_dim = 1536; depth = block_depth + 1;
} else if (version == VERSION_SD3_5_8B) { }
input_size = -1; if (tensor_name.find("attn.ln") != std::string::npos) {
patch_size = 2; if (tensor_name.find(".bias") != std::string::npos) {
in_channels = 16; qk_norm = "ln";
depth = 38; } else {
mlp_ratio = 4.0f;
adm_in_channels = 2048;
out_channels = 16;
pos_embed_max_size = 192;
num_patchs = 36864; // 192 * 192
context_size = 4096;
context_embedder_out_dim = 2432;
qk_norm = "rms";
} else if (version == VERSION_SD3_5_2B) {
input_size = -1;
patch_size = 2;
in_channels = 16;
depth = 24;
d_self = 12;
mlp_ratio = 4.0f;
adm_in_channels = 2048;
out_channels = 16;
pos_embed_max_size = 384;
num_patchs = 147456;
context_size = 4096;
context_embedder_out_dim = 1536;
qk_norm = "rms"; qk_norm = "rms";
} }
}
if (tensor_name.find("attn2") != std::string::npos) {
if (block_depth > d_self) {
d_self = block_depth;
}
}
}
}
if (d_self >= 0) {
pos_embed_max_size *= 2;
num_patchs *= 4;
}
LOG_INFO("MMDiT layers: %d (including %d MMDiT-x layers)", depth, d_self + 1);
int64_t default_out_channels = in_channels; int64_t default_out_channels = in_channels;
hidden_size = 64 * depth; hidden_size = 64 * depth;
context_embedder_out_dim = 64 * depth;
int64_t num_heads = depth; int64_t num_heads = depth;
blocks["x_embedder"] = std::shared_ptr<GGMLBlock>(new PatchEmbed(input_size, patch_size, in_channels, hidden_size, true)); blocks["x_embedder"] = std::shared_ptr<GGMLBlock>(new PatchEmbed(input_size, patch_size, in_channels, hidden_size, true));
@ -801,7 +797,8 @@ public:
struct ggml_tensor* forward_core_with_concat(struct ggml_context* ctx, struct ggml_tensor* forward_core_with_concat(struct ggml_context* ctx,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* c_mod, struct ggml_tensor* c_mod,
struct ggml_tensor* context) { struct ggml_tensor* context,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, H*W, hidden_size] // x: [N, H*W, hidden_size]
// context: [N, n_context, d_context] // context: [N, n_context, d_context]
// c: [N, hidden_size] // c: [N, hidden_size]
@ -809,6 +806,11 @@ public:
auto final_layer = std::dynamic_pointer_cast<FinalLayer>(blocks["final_layer"]); auto final_layer = std::dynamic_pointer_cast<FinalLayer>(blocks["final_layer"]);
for (int i = 0; i < depth; i++) { for (int i = 0; i < depth; i++) {
// skip iteration if i is in skip_layers
if (skip_layers.size() > 0 && std::find(skip_layers.begin(), skip_layers.end(), i) != skip_layers.end()) {
continue;
}
auto block = std::dynamic_pointer_cast<JointBlock>(blocks["joint_blocks." + std::to_string(i)]); auto block = std::dynamic_pointer_cast<JointBlock>(blocks["joint_blocks." + std::to_string(i)]);
auto context_x = block->forward(ctx, context, x, c_mod); auto context_x = block->forward(ctx, context, x, c_mod);
@ -825,7 +827,8 @@ public:
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* t, struct ggml_tensor* t,
struct ggml_tensor* y = NULL, struct ggml_tensor* y = NULL,
struct ggml_tensor* context = NULL) { struct ggml_tensor* context = NULL,
std::vector<int> skip_layers = std::vector<int>()) {
// Forward pass of DiT. // Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
// t: (N,) tensor of diffusion timesteps // t: (N,) tensor of diffusion timesteps
@ -856,22 +859,23 @@ public:
context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536] context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536]
} }
x = forward_core_with_concat(ctx, x, c, context); // (N, H*W, patch_size ** 2 * out_channels) x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels)
x = unpatchify(ctx, x, h, w); // [N, C, H, W] x = unpatchify(ctx, x, h, w); // [N, C, H, W]
return x; return x;
} }
}; };
struct MMDiTRunner : public GGMLRunner { struct MMDiTRunner : public GGMLRunner {
MMDiT mmdit; MMDiT mmdit;
static std::map<std::string, enum ggml_type> empty_tensor_types;
MMDiTRunner(ggml_backend_t backend, MMDiTRunner(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types = empty_tensor_types,
SDVersion version = VERSION_SD3_2B) const std::string prefix = "")
: GGMLRunner(backend, wtype), mmdit(version) { : GGMLRunner(backend), mmdit(tensor_types) {
mmdit.init(params_ctx, wtype); mmdit.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {
@ -885,7 +889,8 @@ struct MMDiTRunner : public GGMLRunner {
struct ggml_cgraph* build_graph(struct ggml_tensor* x, struct ggml_cgraph* build_graph(struct ggml_tensor* x,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y) { struct ggml_tensor* y,
std::vector<int> skip_layers = std::vector<int>()) {
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, MMDIT_GRAPH_SIZE, false); struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, MMDIT_GRAPH_SIZE, false);
x = to_backend(x); x = to_backend(x);
@ -897,7 +902,8 @@ struct MMDiTRunner : public GGMLRunner {
x, x,
timesteps, timesteps,
y, y,
context); context,
skip_layers);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
@ -910,13 +916,14 @@ struct MMDiTRunner : public GGMLRunner {
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(x, timesteps, context, y); return build_graph(x, timesteps, context, y, skip_layers);
}; };
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
@ -965,7 +972,7 @@ struct MMDiTRunner : public GGMLRunner {
// ggml_backend_t backend = ggml_backend_cuda_init(0); // ggml_backend_t backend = ggml_backend_cuda_init(0);
ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_t backend = ggml_backend_cpu_init();
ggml_type model_data_type = GGML_TYPE_F16; ggml_type model_data_type = GGML_TYPE_F16;
std::shared_ptr<MMDiTRunner> mmdit = std::shared_ptr<MMDiTRunner>(new MMDiTRunner(backend, model_data_type)); std::shared_ptr<MMDiTRunner> mmdit = std::shared_ptr<MMDiTRunner>(new MMDiTRunner(backend));
{ {
LOG_INFO("loading from '%s'", file_path.c_str()); LOG_INFO("loading from '%s'", file_path.c_str());

View file

@ -13,8 +13,8 @@
#include "ggml-alloc.h" #include "ggml-alloc.h"
#include "ggml-backend.h" #include "ggml-backend.h"
#include "ggml.h"
#include "ggml-cpu.h" #include "ggml-cpu.h"
#include "ggml.h"
#include "stable-diffusion.h" #include "stable-diffusion.h"
@ -147,6 +147,33 @@ std::unordered_map<std::string, std::string> vae_decoder_name_map = {
{"first_stage_model.decoder.mid.attn_1.to_v.weight", "first_stage_model.decoder.mid.attn_1.v.weight"}, {"first_stage_model.decoder.mid.attn_1.to_v.weight", "first_stage_model.decoder.mid.attn_1.v.weight"},
}; };
std::unordered_map<std::string, std::string> pmid_v2_name_map = {
{"pmid.qformer_perceiver.perceiver_resampler.layers.0.1.1.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.0.1.1.fc1.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.0.1.3.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.0.1.1.fc2.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.1.1.1.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.1.1.1.fc1.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.1.1.3.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.1.1.1.fc2.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.2.1.1.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.2.1.1.fc1.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.2.1.3.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.2.1.1.fc2.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.3.1.1.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.3.1.1.fc1.weight"},
{"pmid.qformer_perceiver.perceiver_resampler.layers.3.1.3.weight",
"pmid.qformer_perceiver.perceiver_resampler.layers.3.1.1.fc2.weight"},
{"pmid.qformer_perceiver.token_proj.0.bias",
"pmid.qformer_perceiver.token_proj.fc1.bias"},
{"pmid.qformer_perceiver.token_proj.2.bias",
"pmid.qformer_perceiver.token_proj.fc2.bias"},
{"pmid.qformer_perceiver.token_proj.0.weight",
"pmid.qformer_perceiver.token_proj.fc1.weight"},
{"pmid.qformer_perceiver.token_proj.2.weight",
"pmid.qformer_perceiver.token_proj.fc2.weight"},
};
std::string convert_open_clip_to_hf_clip(const std::string& name) { std::string convert_open_clip_to_hf_clip(const std::string& name) {
std::string new_name = name; std::string new_name = name;
std::string prefix; std::string prefix;
@ -213,6 +240,13 @@ std::string convert_vae_decoder_name(const std::string& name) {
return name; return name;
} }
std::string convert_pmid_v2_name(const std::string& name) {
if (pmid_v2_name_map.find(name) != pmid_v2_name_map.end()) {
return pmid_v2_name_map[name];
}
return name;
}
/* If not a SDXL LoRA the unet" prefix will have already been replaced by this /* If not a SDXL LoRA the unet" prefix will have already been replaced by this
* point and "te2" and "te1" don't seem to appear in non-SDXL only "te_" */ * point and "te2" and "te1" don't seem to appear in non-SDXL only "te_" */
std::string convert_sdxl_lora_name(std::string tensor_name) { std::string convert_sdxl_lora_name(std::string tensor_name) {
@ -444,6 +478,8 @@ std::string convert_tensor_name(std::string name) {
new_name = convert_open_clip_to_hf_clip(name); new_name = convert_open_clip_to_hf_clip(name);
} else if (starts_with(name, "first_stage_model.decoder")) { } else if (starts_with(name, "first_stage_model.decoder")) {
new_name = convert_vae_decoder_name(name); new_name = convert_vae_decoder_name(name);
} else if (starts_with(name, "pmid.qformer_perceiver")) {
new_name = convert_pmid_v2_name(name);
} else if (starts_with(name, "control_model.")) { // for controlnet pth models } else if (starts_with(name, "control_model.")) { // for controlnet pth models
size_t pos = name.find('.'); size_t pos = name.find('.');
if (pos != std::string::npos) { if (pos != std::string::npos) {
@ -615,7 +651,6 @@ uint16_t f8_e4m3_to_f16(uint8_t f8) {
return ggml_fp32_to_fp16(*reinterpret_cast<const float*>(&result)); return ggml_fp32_to_fp16(*reinterpret_cast<const float*>(&result));
} }
uint16_t f8_e5m2_to_f16(uint8_t fp8) { uint16_t f8_e5m2_to_f16(uint8_t fp8) {
uint8_t sign = (fp8 >> 7) & 0x1; uint8_t sign = (fp8 >> 7) & 0x1;
uint8_t exponent = (fp8 >> 2) & 0x1F; uint8_t exponent = (fp8 >> 2) & 0x1F;
@ -852,13 +887,11 @@ bool ModelLoader::init_from_file(const std::string& file_path, const std::string
} else if (is_safetensors_file(file_path)) { } else if (is_safetensors_file(file_path)) {
LOG_INFO("load %s using safetensors format", file_path.c_str()); LOG_INFO("load %s using safetensors format", file_path.c_str());
return init_from_safetensors_file(file_path, prefix); return init_from_safetensors_file(file_path, prefix);
}
//disable ckpt loading //disable ckpt loading
// else if (is_zip_file(file_path)) { // } else if (is_zip_file(file_path)) {
// LOG_INFO("load %s using checkpoint format", file_path.c_str()); // LOG_INFO("load %s using checkpoint format", file_path.c_str());
// return init_from_ckpt_file(file_path, prefix); // return init_from_ckpt_file(file_path, prefix);
// } } else {
else {
LOG_WARN("unknown format %s", file_path.c_str()); LOG_WARN("unknown format %s", file_path.c_str());
return false; return false;
} }
@ -895,6 +928,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s
GGML_ASSERT(ggml_nbytes(dummy) == tensor_storage.nbytes()); GGML_ASSERT(ggml_nbytes(dummy) == tensor_storage.nbytes());
tensor_storages.push_back(tensor_storage); tensor_storages.push_back(tensor_storage);
tensor_storages_types[tensor_storage.name] = tensor_storage.type;
} }
gguf_free(ctx_gguf_); gguf_free(ctx_gguf_);
@ -1039,6 +1073,7 @@ bool ModelLoader::init_from_safetensors_file(const std::string& file_path, const
} }
tensor_storages.push_back(tensor_storage); tensor_storages.push_back(tensor_storage);
tensor_storages_types[tensor_storage.name] = tensor_storage.type;
// LOG_DEBUG("%s %s", tensor_storage.to_string().c_str(), dtype.c_str()); // LOG_DEBUG("%s %s", tensor_storage.to_string().c_str(), dtype.c_str());
} }
@ -1264,7 +1299,7 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer,
zip_t* zip, zip_t* zip,
std::string dir, std::string dir,
size_t file_index, size_t file_index,
const std::string& prefix) { const std::string prefix) {
uint8_t* buffer_end = buffer + buffer_size; uint8_t* buffer_end = buffer + buffer_size;
if (buffer[0] == 0x80) { // proto if (buffer[0] == 0x80) { // proto
if (buffer[1] != 2) { if (buffer[1] != 2) {
@ -1366,9 +1401,11 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer,
reader.tensor_storage.reverse_ne(); reader.tensor_storage.reverse_ne();
reader.tensor_storage.file_index = file_index; reader.tensor_storage.file_index = file_index;
// if(strcmp(prefix.c_str(), "scarlett") == 0) // if(strcmp(prefix.c_str(), "scarlett") == 0)
// printf(" got tensor %s \n ", reader.tensor_storage.name.c_str()); // printf(" ZIP got tensor %s \n ", reader.tensor_storage.name.c_str());
reader.tensor_storage.name = prefix + reader.tensor_storage.name; reader.tensor_storage.name = prefix + reader.tensor_storage.name;
tensor_storages.push_back(reader.tensor_storage); tensor_storages.push_back(reader.tensor_storage);
tensor_storages_types[reader.tensor_storage.name] = reader.tensor_storage.type;
// LOG_DEBUG("%s", reader.tensor_storage.name.c_str()); // LOG_DEBUG("%s", reader.tensor_storage.name.c_str());
// reset // reset
reader = PickleTensorReader(); reader = PickleTensorReader();
@ -1403,6 +1440,7 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
size_t pos = name.find("data.pkl"); size_t pos = name.find("data.pkl");
if (pos != std::string::npos) { if (pos != std::string::npos) {
std::string dir = name.substr(0, pos); std::string dir = name.substr(0, pos);
printf("ZIP %d, name = %s, dir = %s \n", i, name.c_str(), dir.c_str());
void* pkl_data = NULL; void* pkl_data = NULL;
size_t pkl_size; size_t pkl_size;
zip_entry_read(zip, &pkl_data, &pkl_size); zip_entry_read(zip, &pkl_data, &pkl_size);
@ -1432,23 +1470,12 @@ bool ModelLoader::has_diffusion_model_tensors()
SDVersion ModelLoader::get_sd_version() { SDVersion ModelLoader::get_sd_version() {
TensorStorage token_embedding_weight; TensorStorage token_embedding_weight;
bool is_flux = false;
bool is_sd3 = false;
for (auto& tensor_storage : tensor_storages) { for (auto& tensor_storage : tensor_storages) {
if (tensor_storage.name.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) {
return VERSION_FLUX_DEV;
}
if (tensor_storage.name.find("model.diffusion_model.double_blocks.") != std::string::npos) { if (tensor_storage.name.find("model.diffusion_model.double_blocks.") != std::string::npos) {
is_flux = true; return VERSION_FLUX;
} }
if (tensor_storage.name.find("joint_blocks.0.x_block.attn2.ln_q.weight") != std::string::npos) { if (tensor_storage.name.find("model.diffusion_model.joint_blocks.") != std::string::npos) {
return VERSION_SD3_5_2B; return VERSION_SD3;
}
if (tensor_storage.name.find("joint_blocks.37.x_block.attn.ln_q.weight") != std::string::npos) {
return VERSION_SD3_5_8B;
}
if (tensor_storage.name.find("model.diffusion_model.joint_blocks.23.") != std::string::npos) {
is_sd3 = true;
} }
if (tensor_storage.name.find("conditioner.embedders.1") != std::string::npos) { if (tensor_storage.name.find("conditioner.embedders.1") != std::string::npos) {
return VERSION_SDXL; return VERSION_SDXL;
@ -1470,12 +1497,7 @@ SDVersion ModelLoader::get_sd_version() {
// break; // break;
} }
} }
if (is_flux) {
return VERSION_FLUX_SCHNELL;
}
if (is_sd3) {
return VERSION_SD3_2B;
}
if (token_embedding_weight.ne[0] == 768) { if (token_embedding_weight.ne[0] == 768) {
return VERSION_SD1; return VERSION_SD1;
} else if (token_embedding_weight.ne[0] == 1024) { } else if (token_embedding_weight.ne[0] == 1024) {
@ -1568,6 +1590,21 @@ ggml_type ModelLoader::get_vae_wtype() {
return GGML_TYPE_COUNT; return GGML_TYPE_COUNT;
} }
void ModelLoader::set_wtype_override(ggml_type wtype, std::string prefix) {
for (auto& pair : tensor_storages_types) {
if (prefix.size() < 1 || pair.first.substr(0, prefix.size()) == prefix) {
for (auto& tensor_storage : tensor_storages) {
if (tensor_storage.name == pair.first) {
if (tensor_should_be_converted(tensor_storage, wtype)) {
pair.second = wtype;
}
break;
}
}
}
}
}
std::string ModelLoader::load_merges() { std::string ModelLoader::load_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(merges_utf8_c_str), sizeof(merges_utf8_c_str)); std::string merges_utf8_str(reinterpret_cast<const char*>(merges_utf8_c_str), sizeof(merges_utf8_c_str));
return merges_utf8_str; return merges_utf8_str;

View file

@ -22,14 +22,37 @@ enum SDVersion {
VERSION_SD2, VERSION_SD2,
VERSION_SDXL, VERSION_SDXL,
VERSION_SVD, VERSION_SVD,
VERSION_SD3_2B, VERSION_SD3,
VERSION_FLUX_DEV, VERSION_FLUX,
VERSION_FLUX_SCHNELL,
VERSION_SD3_5_8B,
VERSION_SD3_5_2B,
VERSION_COUNT, VERSION_COUNT,
}; };
static inline bool sd_version_is_flux(SDVersion version) {
if (version == VERSION_FLUX) {
return true;
}
return false;
}
static inline bool sd_version_is_sd3(SDVersion version) {
if (version == VERSION_SD3) {
return true;
}
return false;
}
static inline bool sd_version_is_dit(SDVersion version) {
if (sd_version_is_flux(version) || sd_version_is_sd3(version)) {
return true;
}
return false;
}
enum PMVersion {
PM_VERSION_1,
PM_VERSION_2,
};
struct TensorStorage { struct TensorStorage {
std::string name; std::string name;
ggml_type type = GGML_TYPE_F32; ggml_type type = GGML_TYPE_F32;
@ -143,7 +166,7 @@ protected:
zip_t* zip, zip_t* zip,
std::string dir, std::string dir,
size_t file_index, size_t file_index,
const std::string& prefix); const std::string prefix);
bool init_from_gguf_file(const std::string& file_path, const std::string& prefix = ""); bool init_from_gguf_file(const std::string& file_path, const std::string& prefix = "");
bool init_from_safetensors_file(const std::string& file_path, const std::string& prefix = ""); bool init_from_safetensors_file(const std::string& file_path, const std::string& prefix = "");
@ -151,6 +174,8 @@ protected:
bool init_from_diffusers_file(const std::string& file_path, const std::string& prefix = ""); bool init_from_diffusers_file(const std::string& file_path, const std::string& prefix = "");
public: public:
std::map<std::string, enum ggml_type> tensor_storages_types;
bool init_from_file(const std::string& file_path, const std::string& prefix = ""); bool init_from_file(const std::string& file_path, const std::string& prefix = "");
bool has_diffusion_model_tensors(); bool has_diffusion_model_tensors();
SDVersion get_sd_version(); SDVersion get_sd_version();
@ -158,10 +183,12 @@ public:
ggml_type get_conditioner_wtype(); ggml_type get_conditioner_wtype();
ggml_type get_diffusion_model_wtype(); ggml_type get_diffusion_model_wtype();
ggml_type get_vae_wtype(); ggml_type get_vae_wtype();
void set_wtype_override(ggml_type wtype, std::string prefix = "");
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, ggml_backend_t backend); bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, ggml_backend_t backend);
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors, bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
ggml_backend_t backend, ggml_backend_t backend,
std::set<std::string> ignore_tensors = {}); std::set<std::string> ignore_tensors = {});
bool save_to_gguf_file(const std::string& file_path, ggml_type type); bool save_to_gguf_file(const std::string& file_path, ggml_type type);
bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type); bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type);
int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT); int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT);

View file

@ -42,6 +42,370 @@ public:
} }
}; };
/*
class QFormerPerceiver(nn.Module):
def __init__(self, id_embeddings_dim, cross_attention_dim, num_tokens, embedding_dim=1024, use_residual=True, ratio=4):
super().__init__()
self.num_tokens = num_tokens
self.cross_attention_dim = cross_attention_dim
self.use_residual = use_residual
print(cross_attention_dim*num_tokens)
self.token_proj = nn.Sequential(
nn.Linear(id_embeddings_dim, id_embeddings_dim*ratio),
nn.GELU(),
nn.Linear(id_embeddings_dim*ratio, cross_attention_dim*num_tokens),
)
self.token_norm = nn.LayerNorm(cross_attention_dim)
self.perceiver_resampler = FacePerceiverResampler(
dim=cross_attention_dim,
depth=4,
dim_head=128,
heads=cross_attention_dim // 128,
embedding_dim=embedding_dim,
output_dim=cross_attention_dim,
ff_mult=4,
)
def forward(self, x, last_hidden_state):
x = self.token_proj(x)
x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
x = self.token_norm(x) # cls token
out = self.perceiver_resampler(x, last_hidden_state) # retrieve from patch tokens
if self.use_residual: # TODO: if use_residual is not true
out = x + 1.0 * out
return out
*/
struct PMFeedForward : public GGMLBlock {
// network hparams
int dim;
public:
PMFeedForward(int d, int multi = 4)
: dim(d) {
int inner_dim = dim * multi;
blocks["0"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim));
blocks["1"] = std::shared_ptr<GGMLBlock>(new Mlp(dim, inner_dim, dim, false));
}
struct ggml_tensor* forward(struct ggml_context* ctx,
struct ggml_tensor* x) {
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["0"]);
auto ff = std::dynamic_pointer_cast<Mlp>(blocks["1"]);
x = norm->forward(ctx, x);
x = ff->forward(ctx, x);
return x;
}
};
struct PerceiverAttention : public GGMLBlock {
// network hparams
float scale; // = dim_head**-0.5
int dim_head; // = dim_head
int heads; // = heads
public:
PerceiverAttention(int dim, int dim_h = 64, int h = 8)
: scale(powf(dim_h, -0.5)), dim_head(dim_h), heads(h) {
int inner_dim = dim_head * heads;
blocks["norm1"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim));
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new LayerNorm(dim));
blocks["to_q"] = std::shared_ptr<GGMLBlock>(new Linear(dim, inner_dim, false));
blocks["to_kv"] = std::shared_ptr<GGMLBlock>(new Linear(dim, inner_dim * 2, false));
blocks["to_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim, false));
}
struct ggml_tensor* reshape_tensor(struct ggml_context* ctx,
struct ggml_tensor* x,
int heads) {
int64_t ne[4];
for (int i = 0; i < 4; ++i)
ne[i] = x->ne[i];
// print_ggml_tensor(x, true, "PerceiverAttention reshape x 0: ");
// printf("heads = %d \n", heads);
// x = ggml_view_4d(ctx, x, x->ne[0], x->ne[1], heads, x->ne[2]/heads,
// x->nb[1], x->nb[2], x->nb[3], 0);
x = ggml_reshape_4d(ctx, x, x->ne[0] / heads, heads, x->ne[1], x->ne[2]);
// x = ggml_view_4d(ctx, x, x->ne[0]/heads, heads, x->ne[1], x->ne[2],
// x->nb[1], x->nb[2], x->nb[3], 0);
// x = ggml_cont(ctx, x);
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3));
// print_ggml_tensor(x, true, "PerceiverAttention reshape x 1: ");
// x = ggml_reshape_4d(ctx, x, ne[0], heads, ne[1], ne[2]/heads);
return x;
}
std::vector<struct ggml_tensor*> chunk_half(struct ggml_context* ctx,
struct ggml_tensor* x) {
auto tlo = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], 0);
auto tli = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], x->nb[0] * x->ne[0] / 2);
return {ggml_cont(ctx, tlo),
ggml_cont(ctx, tli)};
}
struct ggml_tensor* forward(struct ggml_context* ctx,
struct ggml_tensor* x,
struct ggml_tensor* latents) {
// x (torch.Tensor): image features
// shape (b, n1, D)
// latent (torch.Tensor): latent features
// shape (b, n2, D)
int64_t ne[4];
for (int i = 0; i < 4; ++i)
ne[i] = latents->ne[i];
auto norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm1"]);
auto norm2 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm2"]);
x = norm1->forward(ctx, x);
latents = norm2->forward(ctx, latents);
auto to_q = std::dynamic_pointer_cast<Linear>(blocks["to_q"]);
auto q = to_q->forward(ctx, latents);
auto kv_input = ggml_concat(ctx, x, latents, 1);
auto to_kv = std::dynamic_pointer_cast<Linear>(blocks["to_kv"]);
auto kv = to_kv->forward(ctx, kv_input);
auto k = ggml_view_4d(ctx, kv, kv->ne[0] / 2, kv->ne[1], kv->ne[2], kv->ne[3], kv->nb[1] / 2, kv->nb[2] / 2, kv->nb[3] / 2, 0);
auto v = ggml_view_4d(ctx, kv, kv->ne[0] / 2, kv->ne[1], kv->ne[2], kv->ne[3], kv->nb[1] / 2, kv->nb[2] / 2, kv->nb[3] / 2, kv->nb[0] * (kv->ne[0] / 2));
k = ggml_cont(ctx, k);
v = ggml_cont(ctx, v);
q = reshape_tensor(ctx, q, heads);
k = reshape_tensor(ctx, k, heads);
v = reshape_tensor(ctx, v, heads);
scale = 1.f / sqrt(sqrt((float)dim_head));
k = ggml_scale_inplace(ctx, k, scale);
q = ggml_scale_inplace(ctx, q, scale);
// auto weight = ggml_mul_mat(ctx, q, k);
auto weight = ggml_mul_mat(ctx, k, q); // NOTE order of mul is opposite to pytorch
// GGML's softmax() is equivalent to pytorch's softmax(x, dim=-1)
// in this case, dimension along which Softmax will be computed is the last dim
// in torch and the first dim in GGML, consistent with the convention that pytorch's
// last dimension (varying most rapidly) corresponds to GGML's first (varying most rapidly).
// weight = ggml_soft_max(ctx, weight);
weight = ggml_soft_max_inplace(ctx, weight);
v = ggml_cont(ctx, ggml_transpose(ctx, v));
// auto out = ggml_mul_mat(ctx, weight, v);
auto out = ggml_mul_mat(ctx, v, weight); // NOTE order of mul is opposite to pytorch
out = ggml_cont(ctx, ggml_permute(ctx, out, 0, 2, 1, 3));
out = ggml_reshape_3d(ctx, out, ne[0], ne[1], ggml_nelements(out) / (ne[0] * ne[1]));
auto to_out = std::dynamic_pointer_cast<Linear>(blocks["to_out"]);
out = to_out->forward(ctx, out);
return out;
}
};
struct FacePerceiverResampler : public GGMLBlock {
// network hparams
int depth;
public:
FacePerceiverResampler(int dim = 768,
int d = 4,
int dim_head = 64,
int heads = 16,
int embedding_dim = 1280,
int output_dim = 768,
int ff_mult = 4)
: depth(d) {
blocks["proj_in"] = std::shared_ptr<GGMLBlock>(new Linear(embedding_dim, dim, true));
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(dim, output_dim, true));
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new LayerNorm(output_dim));
for (int i = 0; i < depth; i++) {
std::string name = "layers." + std::to_string(i) + ".0";
blocks[name] = std::shared_ptr<GGMLBlock>(new PerceiverAttention(dim, dim_head, heads));
name = "layers." + std::to_string(i) + ".1";
blocks[name] = std::shared_ptr<GGMLBlock>(new PMFeedForward(dim, ff_mult));
}
}
struct ggml_tensor* forward(struct ggml_context* ctx,
struct ggml_tensor* latents,
struct ggml_tensor* x) {
// x: [N, channels, h, w]
auto proj_in = std::dynamic_pointer_cast<Linear>(blocks["proj_in"]);
auto proj_out = std::dynamic_pointer_cast<Linear>(blocks["proj_out"]);
auto norm_out = std::dynamic_pointer_cast<LayerNorm>(blocks["norm_out"]);
x = proj_in->forward(ctx, x);
for (int i = 0; i < depth; i++) {
std::string name = "layers." + std::to_string(i) + ".0";
auto attn = std::dynamic_pointer_cast<PerceiverAttention>(blocks[name]);
name = "layers." + std::to_string(i) + ".1";
auto ff = std::dynamic_pointer_cast<PMFeedForward>(blocks[name]);
auto t = attn->forward(ctx, x, latents);
latents = ggml_add(ctx, t, latents);
t = ff->forward(ctx, latents);
latents = ggml_add(ctx, t, latents);
}
latents = proj_out->forward(ctx, latents);
latents = norm_out->forward(ctx, latents);
return latents;
}
};
struct QFormerPerceiver : public GGMLBlock {
// network hparams
int num_tokens;
int cross_attention_dim;
bool use_residul;
public:
QFormerPerceiver(int id_embeddings_dim, int cross_attention_d, int num_t, int embedding_dim = 1024, bool use_r = true, int ratio = 4)
: cross_attention_dim(cross_attention_d), num_tokens(num_t), use_residul(use_r) {
blocks["token_proj"] = std::shared_ptr<GGMLBlock>(new Mlp(id_embeddings_dim,
id_embeddings_dim * ratio,
cross_attention_dim * num_tokens,
true));
blocks["token_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(cross_attention_d));
blocks["perceiver_resampler"] = std::shared_ptr<GGMLBlock>(new FacePerceiverResampler(
cross_attention_dim,
4,
128,
cross_attention_dim / 128,
embedding_dim,
cross_attention_dim,
4));
}
/*
def forward(self, x, last_hidden_state):
x = self.token_proj(x)
x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
x = self.token_norm(x) # cls token
out = self.perceiver_resampler(x, last_hidden_state) # retrieve from patch tokens
if self.use_residual: # TODO: if use_residual is not true
out = x + 1.0 * out
return out
*/
struct ggml_tensor* forward(struct ggml_context* ctx,
struct ggml_tensor* x,
struct ggml_tensor* last_hidden_state) {
// x: [N, channels, h, w]
auto token_proj = std::dynamic_pointer_cast<Mlp>(blocks["token_proj"]);
auto token_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["token_norm"]);
auto perceiver_resampler = std::dynamic_pointer_cast<FacePerceiverResampler>(blocks["perceiver_resampler"]);
x = token_proj->forward(ctx, x);
int64_t nel = ggml_nelements(x);
x = ggml_reshape_3d(ctx, x, cross_attention_dim, num_tokens, nel / (cross_attention_dim * num_tokens));
x = token_norm->forward(ctx, x);
struct ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state);
if (use_residul)
out = ggml_add(ctx, x, out);
return out;
}
};
/*
class FacePerceiverResampler(torch.nn.Module):
def __init__(
self,
*,
dim=768,
depth=4,
dim_head=64,
heads=16,
embedding_dim=1280,
output_dim=768,
ff_mult=4,
):
super().__init__()
self.proj_in = torch.nn.Linear(embedding_dim, dim)
self.proj_out = torch.nn.Linear(dim, output_dim)
self.norm_out = torch.nn.LayerNorm(output_dim)
self.layers = torch.nn.ModuleList([])
for _ in range(depth):
self.layers.append(
torch.nn.ModuleList(
[
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
FeedForward(dim=dim, mult=ff_mult),
]
)
)
def forward(self, latents, x):
x = self.proj_in(x)
for attn, ff in self.layers:
latents = attn(x, latents) + latents
latents = ff(latents) + latents
latents = self.proj_out(latents)
return self.norm_out(latents)
*/
/*
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias=False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias=False),
)
def reshape_tensor(x, heads):
bs, length, width = x.shape
# (bs, length, width) --> (bs, length, n_heads, dim_per_head)
x = x.view(bs, length, heads, -1)
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
x = x.transpose(1, 2)
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
x = x.reshape(bs, heads, length, -1)
return x
class PerceiverAttention(nn.Module):
def __init__(self, *, dim, dim_head=64, heads=8):
super().__init__()
self.scale = dim_head**-0.5
self.dim_head = dim_head
self.heads = heads
inner_dim = dim_head * heads
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x, latents):
"""
Args:
x (torch.Tensor): image features
shape (b, n1, D)
latent (torch.Tensor): latent features
shape (b, n2, D)
"""
x = self.norm1(x)
latents = self.norm2(latents)
b, l, _ = latents.shape
q = self.to_q(latents)
kv_input = torch.cat((x, latents), dim=-2)
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
q = reshape_tensor(q, self.heads)
k = reshape_tensor(k, self.heads)
v = reshape_tensor(v, self.heads)
# attention
scale = 1 / math.sqrt(math.sqrt(self.dim_head))
weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
out = weight @ v
out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
return self.to_out(out)
*/
struct FuseModule : public GGMLBlock { struct FuseModule : public GGMLBlock {
// network hparams // network hparams
int embed_dim; int embed_dim;
@ -61,12 +425,19 @@ public:
auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]); auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]);
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]); auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]);
auto prompt_embeds0 = ggml_cont(ctx, ggml_permute(ctx, prompt_embeds, 2, 0, 1, 3)); // print_ggml_tensor(id_embeds, true, "Fuseblock id_embeds: ");
auto id_embeds0 = ggml_cont(ctx, ggml_permute(ctx, id_embeds, 2, 0, 1, 3)); // print_ggml_tensor(prompt_embeds, true, "Fuseblock prompt_embeds: ");
// concat is along dim 2
auto stacked_id_embeds = ggml_concat(ctx, prompt_embeds0, id_embeds0, 2);
stacked_id_embeds = ggml_cont(ctx, ggml_permute(ctx, stacked_id_embeds, 1, 2, 0, 3));
// auto prompt_embeds0 = ggml_cont(ctx, ggml_permute(ctx, prompt_embeds, 2, 0, 1, 3));
// auto id_embeds0 = ggml_cont(ctx, ggml_permute(ctx, id_embeds, 2, 0, 1, 3));
// print_ggml_tensor(id_embeds0, true, "Fuseblock id_embeds0: ");
// print_ggml_tensor(prompt_embeds0, true, "Fuseblock prompt_embeds0: ");
// concat is along dim 2
// auto stacked_id_embeds = ggml_concat(ctx, prompt_embeds0, id_embeds0, 2);
auto stacked_id_embeds = ggml_concat(ctx, prompt_embeds, id_embeds, 0);
// print_ggml_tensor(stacked_id_embeds, true, "Fuseblock stacked_id_embeds 0: ");
// stacked_id_embeds = ggml_cont(ctx, ggml_permute(ctx, stacked_id_embeds, 1, 2, 0, 3));
// print_ggml_tensor(stacked_id_embeds, true, "Fuseblock stacked_id_embeds 1: ");
// stacked_id_embeds = mlp1.forward(ctx, stacked_id_embeds); // stacked_id_embeds = mlp1.forward(ctx, stacked_id_embeds);
// stacked_id_embeds = ggml_add(ctx, stacked_id_embeds, prompt_embeds); // stacked_id_embeds = ggml_add(ctx, stacked_id_embeds, prompt_embeds);
// stacked_id_embeds = mlp2.forward(ctx, stacked_id_embeds); // stacked_id_embeds = mlp2.forward(ctx, stacked_id_embeds);
@ -77,6 +448,8 @@ public:
stacked_id_embeds = mlp2->forward(ctx, stacked_id_embeds); stacked_id_embeds = mlp2->forward(ctx, stacked_id_embeds);
stacked_id_embeds = layer_norm->forward(ctx, stacked_id_embeds); stacked_id_embeds = layer_norm->forward(ctx, stacked_id_embeds);
// print_ggml_tensor(stacked_id_embeds, true, "Fuseblock stacked_id_embeds 1: ");
return stacked_id_embeds; return stacked_id_embeds;
} }
@ -98,23 +471,31 @@ public:
// print_ggml_tensor(class_tokens_mask_pos, true, "class_tokens_mask_pos"); // print_ggml_tensor(class_tokens_mask_pos, true, "class_tokens_mask_pos");
struct ggml_tensor* image_token_embeds = ggml_get_rows(ctx, prompt_embeds, class_tokens_mask_pos); struct ggml_tensor* image_token_embeds = ggml_get_rows(ctx, prompt_embeds, class_tokens_mask_pos);
ggml_set_name(image_token_embeds, "image_token_embeds"); ggml_set_name(image_token_embeds, "image_token_embeds");
valid_id_embeds = ggml_reshape_2d(ctx, valid_id_embeds, valid_id_embeds->ne[0],
ggml_nelements(valid_id_embeds) / valid_id_embeds->ne[0]);
struct ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds); struct ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds);
stacked_id_embeds = ggml_cont(ctx, ggml_permute(ctx, stacked_id_embeds, 0, 2, 1, 3)); // stacked_id_embeds = ggml_cont(ctx, ggml_permute(ctx, stacked_id_embeds, 0, 2, 1, 3));
// print_ggml_tensor(stacked_id_embeds, true, "AA stacked_id_embeds");
// print_ggml_tensor(left, true, "AA left");
// print_ggml_tensor(right, true, "AA right");
if (left && right) { if (left && right) {
stacked_id_embeds = ggml_concat(ctx, left, stacked_id_embeds, 2); stacked_id_embeds = ggml_concat(ctx, left, stacked_id_embeds, 1);
stacked_id_embeds = ggml_concat(ctx, stacked_id_embeds, right, 2); stacked_id_embeds = ggml_concat(ctx, stacked_id_embeds, right, 1);
} else if (left) { } else if (left) {
stacked_id_embeds = ggml_concat(ctx, left, stacked_id_embeds, 2); stacked_id_embeds = ggml_concat(ctx, left, stacked_id_embeds, 1);
} else if (right) { } else if (right) {
stacked_id_embeds = ggml_concat(ctx, stacked_id_embeds, right, 2); stacked_id_embeds = ggml_concat(ctx, stacked_id_embeds, right, 1);
} }
stacked_id_embeds = ggml_cont(ctx, ggml_permute(ctx, stacked_id_embeds, 0, 2, 1, 3)); // print_ggml_tensor(stacked_id_embeds, true, "BB stacked_id_embeds");
// stacked_id_embeds = ggml_cont(ctx, ggml_permute(ctx, stacked_id_embeds, 0, 2, 1, 3));
// print_ggml_tensor(stacked_id_embeds, true, "CC stacked_id_embeds");
class_tokens_mask = ggml_cont(ctx, ggml_transpose(ctx, class_tokens_mask)); class_tokens_mask = ggml_cont(ctx, ggml_transpose(ctx, class_tokens_mask));
class_tokens_mask = ggml_repeat(ctx, class_tokens_mask, prompt_embeds); class_tokens_mask = ggml_repeat(ctx, class_tokens_mask, prompt_embeds);
prompt_embeds = ggml_mul(ctx, prompt_embeds, class_tokens_mask); prompt_embeds = ggml_mul(ctx, prompt_embeds, class_tokens_mask);
struct ggml_tensor* updated_prompt_embeds = ggml_add(ctx, prompt_embeds, stacked_id_embeds); struct ggml_tensor* updated_prompt_embeds = ggml_add(ctx, prompt_embeds, stacked_id_embeds);
ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds"); ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds");
// print_ggml_tensor(updated_prompt_embeds, true, "updated_prompt_embeds: ");
return updated_prompt_embeds; return updated_prompt_embeds;
} }
}; };
@ -159,10 +540,77 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
} }
}; };
struct PhotoMakerIDEncoder_CLIPInsightfaceExtendtokenBlock : public CLIPVisionModelProjection {
int cross_attention_dim;
int num_tokens;
PhotoMakerIDEncoder_CLIPInsightfaceExtendtokenBlock(int id_embeddings_dim = 512)
: CLIPVisionModelProjection(OPENAI_CLIP_VIT_L_14),
cross_attention_dim(2048),
num_tokens(2) {
blocks["visual_projection_2"] = std::shared_ptr<GGMLBlock>(new Linear(1024, 1280, false));
blocks["fuse_module"] = std::shared_ptr<GGMLBlock>(new FuseModule(2048));
/*
cross_attention_dim = 2048
# projection
self.num_tokens = 2
self.cross_attention_dim = cross_attention_dim
self.qformer_perceiver = QFormerPerceiver(
id_embeddings_dim,
cross_attention_dim,
self.num_tokens,
)*/
blocks["qformer_perceiver"] = std::shared_ptr<GGMLBlock>(new QFormerPerceiver(id_embeddings_dim,
cross_attention_dim,
num_tokens));
}
/*
def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds):
b, num_inputs, c, h, w = id_pixel_values.shape
id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
last_hidden_state = self.vision_model(id_pixel_values)[0]
id_embeds = id_embeds.view(b * num_inputs, -1)
id_embeds = self.qformer_perceiver(id_embeds, last_hidden_state)
id_embeds = id_embeds.view(b, num_inputs, self.num_tokens, -1)
updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
*/
struct ggml_tensor* forward(struct ggml_context* ctx,
struct ggml_tensor* id_pixel_values,
struct ggml_tensor* prompt_embeds,
struct ggml_tensor* class_tokens_mask,
struct ggml_tensor* class_tokens_mask_pos,
struct ggml_tensor* id_embeds,
struct ggml_tensor* left,
struct ggml_tensor* right) {
// x: [N, channels, h, w]
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
auto qformer_perceiver = std::dynamic_pointer_cast<QFormerPerceiver>(blocks["qformer_perceiver"]);
// struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size]
id_embeds = qformer_perceiver->forward(ctx, id_embeds, last_hidden_state);
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
prompt_embeds,
id_embeds,
class_tokens_mask,
class_tokens_mask_pos,
left, right);
return updated_prompt_embeds;
}
};
struct PhotoMakerIDEncoder : public GGMLRunner { struct PhotoMakerIDEncoder : public GGMLRunner {
public: public:
SDVersion version = VERSION_SDXL; SDVersion version = VERSION_SDXL;
PMVersion pm_version = PM_VERSION_1;
PhotoMakerIDEncoderBlock id_encoder; PhotoMakerIDEncoderBlock id_encoder;
PhotoMakerIDEncoder_CLIPInsightfaceExtendtokenBlock id_encoder2;
float style_strength; float style_strength;
std::vector<float> ctm; std::vector<float> ctm;
@ -175,25 +623,38 @@ public:
std::vector<float> zeros_right; std::vector<float> zeros_right;
public: public:
PhotoMakerIDEncoder(ggml_backend_t backend, ggml_type wtype, SDVersion version = VERSION_SDXL, float sty = 20.f) PhotoMakerIDEncoder(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f)
: GGMLRunner(backend, wtype), : GGMLRunner(backend),
version(version), version(version),
pm_version(pm_v),
style_strength(sty) { style_strength(sty) {
id_encoder.init(params_ctx, wtype); if (pm_version == PM_VERSION_1) {
id_encoder.init(params_ctx, tensor_types, prefix);
} else if (pm_version == PM_VERSION_2) {
id_encoder2.init(params_ctx, tensor_types, prefix);
}
} }
std::string get_desc() { std::string get_desc() {
return "pmid"; return "pmid";
} }
PMVersion get_version() const {
return pm_version;
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
if (pm_version == PM_VERSION_1)
id_encoder.get_param_tensors(tensors, prefix); id_encoder.get_param_tensors(tensors, prefix);
else if (pm_version == PM_VERSION_2)
id_encoder2.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph( // struct ggml_allocr* allocr, struct ggml_cgraph* build_graph( // struct ggml_allocr* allocr,
struct ggml_tensor* id_pixel_values, struct ggml_tensor* id_pixel_values,
struct ggml_tensor* prompt_embeds, struct ggml_tensor* prompt_embeds,
std::vector<bool>& class_tokens_mask) { std::vector<bool>& class_tokens_mask,
struct ggml_tensor* id_embeds) {
ctm.clear(); ctm.clear();
ctmf16.clear(); ctmf16.clear();
ctmpos.clear(); ctmpos.clear();
@ -214,25 +675,32 @@ public:
struct ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values); struct ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values);
struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds); struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
struct ggml_tensor* id_embeds_d = to_backend(id_embeds);
struct ggml_tensor* left = NULL; struct ggml_tensor* left = NULL;
struct ggml_tensor* right = NULL; struct ggml_tensor* right = NULL;
for (int i = 0; i < class_tokens_mask.size(); i++) { for (int i = 0; i < class_tokens_mask.size(); i++) {
if (class_tokens_mask[i]) { if (class_tokens_mask[i]) {
// printf(" 1,");
ctm.push_back(0.f); // here use 0.f instead of 1.f to make a scale mask ctm.push_back(0.f); // here use 0.f instead of 1.f to make a scale mask
ctmf16.push_back(ggml_fp32_to_fp16(0.f)); // here use 0.f instead of 1.f to make a scale mask ctmf16.push_back(ggml_fp32_to_fp16(0.f)); // here use 0.f instead of 1.f to make a scale mask
ctmpos.push_back(i); ctmpos.push_back(i);
} else { } else {
// printf(" 0,");
ctm.push_back(1.f); // here use 1.f instead of 0.f to make a scale mask ctm.push_back(1.f); // here use 1.f instead of 0.f to make a scale mask
ctmf16.push_back(ggml_fp32_to_fp16(1.f)); // here use 0.f instead of 1.f to make a scale mask ctmf16.push_back(ggml_fp32_to_fp16(1.f)); // here use 0.f instead of 1.f to make a scale mask
} }
} }
// printf("\n");
if (ctmpos[0] > 0) { if (ctmpos[0] > 0) {
left = ggml_new_tensor_3d(ctx0, type, hidden_size, 1, ctmpos[0]); // left = ggml_new_tensor_3d(ctx0, type, hidden_size, 1, ctmpos[0]);
left = ggml_new_tensor_3d(ctx0, type, hidden_size, ctmpos[0], 1);
} }
if (ctmpos[ctmpos.size() - 1] < seq_length - 1) { if (ctmpos[ctmpos.size() - 1] < seq_length - 1) {
// right = ggml_new_tensor_3d(ctx0, type,
// hidden_size, 1, seq_length - ctmpos[ctmpos.size() - 1] - 1);
right = ggml_new_tensor_3d(ctx0, type, right = ggml_new_tensor_3d(ctx0, type,
hidden_size, 1, seq_length - ctmpos[ctmpos.size() - 1] - 1); hidden_size, seq_length - ctmpos[ctmpos.size() - 1] - 1, 1);
} }
struct ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ctmpos.size()); struct ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ctmpos.size());
@ -265,12 +733,23 @@ public:
} }
} }
} }
struct ggml_tensor* updated_prompt_embeds = id_encoder.forward(ctx0, struct ggml_tensor* updated_prompt_embeds = NULL;
if (pm_version == PM_VERSION_1)
updated_prompt_embeds = id_encoder.forward(ctx0,
id_pixel_values_d, id_pixel_values_d,
prompt_embeds_d, prompt_embeds_d,
class_tokens_mask_d, class_tokens_mask_d,
class_tokens_mask_pos, class_tokens_mask_pos,
left, right); left, right);
else if (pm_version == PM_VERSION_2)
updated_prompt_embeds = id_encoder2.forward(ctx0,
id_pixel_values_d,
prompt_embeds_d,
class_tokens_mask_d,
class_tokens_mask_pos,
id_embeds_d,
left, right);
ggml_build_forward_expand(gf, updated_prompt_embeds); ggml_build_forward_expand(gf, updated_prompt_embeds);
return gf; return gf;
@ -279,12 +758,13 @@ public:
void compute(const int n_threads, void compute(const int n_threads,
struct ggml_tensor* id_pixel_values, struct ggml_tensor* id_pixel_values,
struct ggml_tensor* prompt_embeds, struct ggml_tensor* prompt_embeds,
struct ggml_tensor* id_embeds,
std::vector<bool>& class_tokens_mask, std::vector<bool>& class_tokens_mask,
struct ggml_tensor** updated_prompt_embeds, struct ggml_tensor** updated_prompt_embeds,
ggml_context* output_ctx) { ggml_context* output_ctx) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask); // return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask); return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
}; };
// GGMLRunner::compute(get_graph, n_threads, updated_prompt_embeds); // GGMLRunner::compute(get_graph, n_threads, updated_prompt_embeds);
@ -292,4 +772,74 @@ public:
} }
}; };
struct PhotoMakerIDEmbed : public GGMLRunner {
std::map<std::string, struct ggml_tensor*> tensors;
std::string file_path;
ModelLoader* model_loader;
bool load_failed = false;
bool applied = false;
PhotoMakerIDEmbed(ggml_backend_t backend,
ModelLoader* ml,
const std::string& file_path = "",
const std::string& prefix = "")
: file_path(file_path), GGMLRunner(backend), model_loader(ml) {
if (!model_loader->init_from_file(file_path, prefix)) {
load_failed = true;
}
}
std::string get_desc() {
return "id_embeds";
}
bool load_from_file(bool filter_tensor = false) {
LOG_INFO("loading PhotoMaker ID Embeds from '%s'", file_path.c_str());
if (load_failed) {
LOG_ERROR("init photomaker id embed from file failed: '%s'", file_path.c_str());
return false;
}
bool dry_run = true;
auto on_new_tensor_cb = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) -> bool {
const std::string& name = tensor_storage.name;
if (filter_tensor && !contains(name, "pmid.id_embeds")) {
// LOG_INFO("skipping LoRA tesnor '%s'", name.c_str());
return true;
}
if (dry_run) {
struct ggml_tensor* real = ggml_new_tensor(params_ctx,
tensor_storage.type,
tensor_storage.n_dims,
tensor_storage.ne);
tensors[name] = real;
} else {
auto real = tensors[name];
*dst_tensor = real;
}
return true;
};
model_loader->load_tensors(on_new_tensor_cb, backend);
alloc_params_buffer();
dry_run = false;
model_loader->load_tensors(on_new_tensor_cb, backend);
LOG_DEBUG("finished loading PhotoMaker ID Embeds ");
return true;
}
struct ggml_tensor* get() {
std::map<std::string, struct ggml_tensor*>::iterator pos;
pos = tensors.find("pmid.id_embeds");
if (pos != tensors.end())
return pos->second;
return NULL;
}
};
#endif // __PMI_HPP__ #endif // __PMI_HPP__

View file

@ -92,9 +92,15 @@ struct SDParams {
bool normalize_input = false; bool normalize_input = false;
bool clip_on_cpu = false; bool clip_on_cpu = false;
bool vae_on_cpu = false; bool vae_on_cpu = false;
bool diffusion_flash_attn = false;
bool canny_preprocess = false; bool canny_preprocess = false;
bool color = false; bool color = false;
int upscale_repeats = 1; int upscale_repeats = 1;
std::vector<int> skip_layers = {7, 8, 9};
float slg_scale = 0.;
float skip_layer_start = 0.01;
float skip_layer_end = 0.2;
}; };
//shared //shared
@ -255,7 +261,8 @@ bool sdtype_load_model(const sd_load_model_inputs inputs) {
sd_params->schedule, sd_params->schedule,
sd_params->clip_on_cpu, sd_params->clip_on_cpu,
sd_params->control_net_cpu, sd_params->control_net_cpu,
sd_params->vae_on_cpu); sd_params->vae_on_cpu,
sd_params->diffusion_flash_attn);
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
printf("\nError: KCPP SD Failed to create context!\nIf using Flux/SD3.5, make sure you have ALL files required (e.g. VAE, T5, Clip...) or baked in!\n"); printf("\nError: KCPP SD Failed to create context!\nIf using Flux/SD3.5, make sure you have ALL files required (e.g. VAE, T5, Clip...) or baked in!\n");
@ -326,7 +333,7 @@ sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs)
//ensure unsupported dimensions are fixed //ensure unsupported dimensions are fixed
int biggestdim = (sd_params->width>sd_params->height?sd_params->width:sd_params->height); int biggestdim = (sd_params->width>sd_params->height?sd_params->width:sd_params->height);
auto loadedsdver = get_loaded_sd_version(sd_ctx); auto loadedsdver = get_loaded_sd_version(sd_ctx);
if(loadedsdver==SDVersion::VERSION_FLUX_DEV || loadedsdver==SDVersion::VERSION_FLUX_SCHNELL) if(loadedsdver==SDVersion::VERSION_FLUX)
{ {
sd_params->cfg_scale = 1; sd_params->cfg_scale = 1;
if(sampler=="euler a"||sampler=="k_euler_a"||sampler=="euler_a") if(sampler=="euler a"||sampler=="k_euler_a"||sampler=="euler_a")
@ -432,7 +439,12 @@ sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs)
sd_params->control_strength, sd_params->control_strength,
sd_params->style_ratio, sd_params->style_ratio,
sd_params->normalize_input, sd_params->normalize_input,
sd_params->input_id_images_path.c_str()); sd_params->input_id_images_path.c_str(),
sd_params->skip_layers.data(),
sd_params->skip_layers.size(),
sd_params->slg_scale,
sd_params->skip_layer_start,
sd_params->skip_layer_end);
} else { } else {
if (sd_params->width <= 0 || sd_params->width % 64 != 0 || sd_params->height <= 0 || sd_params->height % 64 != 0) { if (sd_params->width <= 0 || sd_params->width % 64 != 0 || sd_params->height <= 0 || sd_params->height % 64 != 0) {
@ -514,7 +526,12 @@ sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs)
sd_params->control_strength, sd_params->control_strength,
sd_params->style_ratio, sd_params->style_ratio,
sd_params->normalize_input, sd_params->normalize_input,
sd_params->input_id_images_path.c_str()); sd_params->input_id_images_path.c_str(),
sd_params->skip_layers.data(),
sd_params->skip_layers.size(),
sd_params->slg_scale,
sd_params->skip_layer_start,
sd_params->skip_layer_end);
} }
if (results == NULL) { if (results == NULL) {

View file

@ -28,11 +28,8 @@ const char* model_version_to_str[] = {
"SD 2.x", "SD 2.x",
"SDXL", "SDXL",
"SVD", "SVD",
"SD3 2B", "SD3.x",
"Flux Dev", "Flux"};
"Flux Schnell",
"SD3.5 8B",
"SD3.5 2B"};
const char* sampling_methods_str[] = { const char* sampling_methods_str[] = {
"Euler A", "Euler A",
@ -93,6 +90,7 @@ public:
std::shared_ptr<ControlNet> control_net; std::shared_ptr<ControlNet> control_net;
std::shared_ptr<PhotoMakerIDEncoder> pmid_model; std::shared_ptr<PhotoMakerIDEncoder> pmid_model;
std::shared_ptr<LoraModel> pmid_lora; std::shared_ptr<LoraModel> pmid_lora;
std::shared_ptr<PhotoMakerIDEmbed> pmid_id_embeds;
std::string taesd_path; std::string taesd_path;
bool use_tiny_autoencoder = false; bool use_tiny_autoencoder = false;
@ -153,7 +151,8 @@ public:
schedule_t schedule, schedule_t schedule,
bool clip_on_cpu, bool clip_on_cpu,
bool control_net_cpu, bool control_net_cpu,
bool vae_on_cpu) { bool vae_on_cpu,
bool diffusion_flash_attn) {
use_tiny_autoencoder = taesd_path.size() > 0; use_tiny_autoencoder = taesd_path.size() > 0;
std::string taesd_path_fixed = taesd_path; std::string taesd_path_fixed = taesd_path;
#ifdef SD_USE_CUBLAS #ifdef SD_USE_CUBLAS
@ -166,7 +165,12 @@ public:
#endif #endif
#ifdef SD_USE_VULKAN #ifdef SD_USE_VULKAN
LOG_DEBUG("Using Vulkan backend"); LOG_DEBUG("Using Vulkan backend");
backend = ggml_backend_vk_init(0); for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
backend = ggml_backend_vk_init(device);
}
if (!backend) {
LOG_WARN("Failed to initialize Vulkan backend");
}
#endif #endif
#ifdef SD_USE_SYCL #ifdef SD_USE_SYCL
LOG_DEBUG("Using SYCL backend"); LOG_DEBUG("Using SYCL backend");
@ -177,13 +181,7 @@ public:
LOG_DEBUG("Using CPU backend"); LOG_DEBUG("Using CPU backend");
backend = ggml_backend_cpu_init(); backend = ggml_backend_cpu_init();
} }
#ifdef SD_USE_FLASH_ATTENTION
#if defined(SD_USE_CUBLAS) || defined(SD_USE_METAL) || defined(SD_USE_SYCL) || defined(SD_USE_VULKAN)
LOG_WARN("Flash Attention not supported with GPU Backend");
#else
LOG_INFO("Flash Attention enabled");
#endif
#endif
ModelLoader model_loader; ModelLoader model_loader;
vae_tiling = vae_tiling_; vae_tiling = vae_tiling_;
@ -285,16 +283,18 @@ public:
conditioner_wtype = wtype; conditioner_wtype = wtype;
diffusion_model_wtype = wtype; diffusion_model_wtype = wtype;
vae_wtype = wtype; vae_wtype = wtype;
model_loader.set_wtype_override(wtype);
} }
if (version == VERSION_SDXL) { if (version == VERSION_SDXL) {
vae_wtype = GGML_TYPE_F32; vae_wtype = GGML_TYPE_F32;
model_loader.set_wtype_override(GGML_TYPE_F32, "vae.");
} }
LOG_INFO("Weight type: %s", ggml_type_name(model_wtype)); LOG_INFO("Weight type: %s", model_wtype != SD_TYPE_COUNT ? ggml_type_name(model_wtype) : "??");
LOG_INFO("Conditioner weight type: %s", ggml_type_name(conditioner_wtype)); LOG_INFO("Conditioner weight type: %s", conditioner_wtype != SD_TYPE_COUNT ? ggml_type_name(conditioner_wtype) : "??");
LOG_INFO("Diffusion model weight type: %s", ggml_type_name(diffusion_model_wtype)); LOG_INFO("Diffusion model weight type: %s", diffusion_model_wtype != SD_TYPE_COUNT ? ggml_type_name(diffusion_model_wtype) : "??");
LOG_INFO("VAE weight type: %s", ggml_type_name(vae_wtype)); LOG_INFO("VAE weight type: %s", vae_wtype != SD_TYPE_COUNT ? ggml_type_name(vae_wtype) : "??");
LOG_DEBUG("ggml tensor size = %d bytes", (int)sizeof(ggml_tensor)); LOG_DEBUG("ggml tensor size = %d bytes", (int)sizeof(ggml_tensor));
@ -307,30 +307,30 @@ public:
"try specifying SDXL VAE FP16 Fix with the --vae parameter. " "try specifying SDXL VAE FP16 Fix with the --vae parameter. "
"You can find it here: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl_vae.safetensors"); "You can find it here: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl_vae.safetensors");
} }
} else if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B) { } else if (sd_version_is_sd3(version)) {
scale_factor = 1.5305f; scale_factor = 1.5305f;
} else if (version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL) { } else if (sd_version_is_flux(version)) {
scale_factor = 0.3611; scale_factor = 0.3611;
// TODO: shift_factor // TODO: shift_factor
} }
if (version == VERSION_SVD) { if (version == VERSION_SVD) {
clip_vision = std::make_shared<FrozenCLIPVisionEmbedder>(backend, conditioner_wtype); clip_vision = std::make_shared<FrozenCLIPVisionEmbedder>(backend, model_loader.tensor_storages_types);
clip_vision->alloc_params_buffer(); clip_vision->alloc_params_buffer();
clip_vision->get_param_tensors(tensors); clip_vision->get_param_tensors(tensors);
diffusion_model = std::make_shared<UNetModel>(backend, diffusion_model_wtype, version); diffusion_model = std::make_shared<UNetModel>(backend, model_loader.tensor_storages_types, version);
diffusion_model->alloc_params_buffer(); diffusion_model->alloc_params_buffer();
diffusion_model->get_param_tensors(tensors); diffusion_model->get_param_tensors(tensors);
first_stage_model = std::make_shared<AutoEncoderKL>(backend, vae_wtype, vae_decode_only, true, version); first_stage_model = std::make_shared<AutoEncoderKL>(backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, true, version);
LOG_DEBUG("vae_decode_only %d", vae_decode_only); LOG_DEBUG("vae_decode_only %d", vae_decode_only);
first_stage_model->alloc_params_buffer(); first_stage_model->alloc_params_buffer();
first_stage_model->get_param_tensors(tensors, "first_stage_model"); first_stage_model->get_param_tensors(tensors, "first_stage_model");
} else { } else {
clip_backend = backend; clip_backend = backend;
bool use_t5xxl = false; bool use_t5xxl = false;
if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B || version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL) { if (sd_version_is_dit(version)) {
use_t5xxl = true; use_t5xxl = true;
} }
if (!ggml_backend_is_cpu(backend) && use_t5xxl && conditioner_wtype != GGML_TYPE_F32) { if (!ggml_backend_is_cpu(backend) && use_t5xxl && conditioner_wtype != GGML_TYPE_F32) {
@ -341,16 +341,27 @@ public:
LOG_INFO("CLIP: Using CPU backend"); LOG_INFO("CLIP: Using CPU backend");
clip_backend = ggml_backend_cpu_init(); clip_backend = ggml_backend_cpu_init();
} }
if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B) { if (diffusion_flash_attn) {
cond_stage_model = std::make_shared<SD3CLIPEmbedder>(clip_backend, conditioner_wtype); LOG_INFO("Using flash attention in the diffusion model");
diffusion_model = std::make_shared<MMDiTModel>(backend, diffusion_model_wtype, version);
} else if (version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL) {
cond_stage_model = std::make_shared<FluxCLIPEmbedder>(clip_backend, conditioner_wtype);
diffusion_model = std::make_shared<FluxModel>(backend, diffusion_model_wtype, version);
} else {
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, conditioner_wtype, embeddings_path, version);
diffusion_model = std::make_shared<UNetModel>(backend, diffusion_model_wtype, version);
} }
if (sd_version_is_sd3(version)) {
if (diffusion_flash_attn) {
LOG_WARN("flash attention in this diffusion model is currently unsupported!");
}
cond_stage_model = std::make_shared<SD3CLIPEmbedder>(clip_backend, model_loader.tensor_storages_types);
diffusion_model = std::make_shared<MMDiTModel>(backend, model_loader.tensor_storages_types);
} else if (sd_version_is_flux(version)) {
cond_stage_model = std::make_shared<FluxCLIPEmbedder>(clip_backend, model_loader.tensor_storages_types);
diffusion_model = std::make_shared<FluxModel>(backend, model_loader.tensor_storages_types, diffusion_flash_attn);
} else {
if (id_embeddings_path.find("v2") != std::string::npos) {
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, model_loader.tensor_storages_types, embeddings_path, version, PM_VERSION_2);
} else {
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, model_loader.tensor_storages_types, embeddings_path, version);
}
diffusion_model = std::make_shared<UNetModel>(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn);
}
cond_stage_model->alloc_params_buffer(); cond_stage_model->alloc_params_buffer();
cond_stage_model->get_param_tensors(tensors); cond_stage_model->get_param_tensors(tensors);
@ -364,11 +375,11 @@ public:
} else { } else {
vae_backend = backend; vae_backend = backend;
} }
first_stage_model = std::make_shared<AutoEncoderKL>(vae_backend, vae_wtype, vae_decode_only, false, version); first_stage_model = std::make_shared<AutoEncoderKL>(vae_backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, false, version);
first_stage_model->alloc_params_buffer(); first_stage_model->alloc_params_buffer();
first_stage_model->get_param_tensors(tensors, "first_stage_model"); first_stage_model->get_param_tensors(tensors, "first_stage_model");
} else { } else {
tae_first_stage = std::make_shared<TinyAutoEncoder>(backend, vae_wtype, vae_decode_only); tae_first_stage = std::make_shared<TinyAutoEncoder>(backend, model_loader.tensor_storages_types, "decoder.layers", vae_decode_only);
} }
// first_stage_model->get_param_tensors(tensors, "first_stage_model."); // first_stage_model->get_param_tensors(tensors, "first_stage_model.");
@ -380,12 +391,17 @@ public:
} else { } else {
controlnet_backend = backend; controlnet_backend = backend;
} }
control_net = std::make_shared<ControlNet>(controlnet_backend, diffusion_model_wtype, version); control_net = std::make_shared<ControlNet>(controlnet_backend, model_loader.tensor_storages_types, version);
} }
pmid_model = std::make_shared<PhotoMakerIDEncoder>(clip_backend, model_wtype, version); if (id_embeddings_path.find("v2") != std::string::npos) {
pmid_model = std::make_shared<PhotoMakerIDEncoder>(backend, model_loader.tensor_storages_types, "pmid", version, PM_VERSION_2);
LOG_INFO("using PhotoMaker Version 2");
} else {
pmid_model = std::make_shared<PhotoMakerIDEncoder>(backend, model_loader.tensor_storages_types, "pmid", version);
}
if (id_embeddings_path.size() > 0) { if (id_embeddings_path.size() > 0) {
pmid_lora = std::make_shared<LoraModel>(backend, model_wtype, id_embeddings_path, ""); pmid_lora = std::make_shared<LoraModel>(backend, id_embeddings_path, "");
if (!pmid_lora->load_from_file(true)) { if (!pmid_lora->load_from_file(true)) {
LOG_WARN("load photomaker lora tensors from %s failed", id_embeddings_path.c_str()); LOG_WARN("load photomaker lora tensors from %s failed", id_embeddings_path.c_str());
return false; return false;
@ -402,14 +418,8 @@ public:
LOG_ERROR(" pmid model params buffer allocation failed"); LOG_ERROR(" pmid model params buffer allocation failed");
return false; return false;
} }
// LOG_INFO("pmid param memory buffer size = %.2fMB ",
// pmid_model->params_buffer_size / 1024.0 / 1024.0);
pmid_model->get_param_tensors(tensors, "pmid"); pmid_model->get_param_tensors(tensors, "pmid");
} }
// if(stacked_id){
// pmid_model.init_params(GGML_TYPE_F32);
// pmid_model.map_by_name(tensors, "pmid.");
// }
} }
struct ggml_init_params params; struct ggml_init_params params;
@ -539,14 +549,17 @@ public:
is_using_v_parameterization = true; is_using_v_parameterization = true;
} }
if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(version)) {
LOG_INFO("running in FLOW mode"); LOG_INFO("running in FLOW mode");
denoiser = std::make_shared<DiscreteFlowDenoiser>(); denoiser = std::make_shared<DiscreteFlowDenoiser>();
} else if (version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL) { } else if (sd_version_is_flux(version)) {
LOG_INFO("running in Flux FLOW mode"); LOG_INFO("running in Flux FLOW mode");
float shift = 1.15f; float shift = 1.0f; // TODO: validate
if (version == VERSION_FLUX_SCHNELL) { for (auto pair : model_loader.tensor_storages_types) {
shift = 1.0f; // TODO: validate if (pair.first.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) {
shift = 1.15f;
break;
}
} }
denoiser = std::make_shared<FluxFlowDenoiser>(shift); denoiser = std::make_shared<FluxFlowDenoiser>(shift);
} else if (is_using_v_parameterization) { } else if (is_using_v_parameterization) {
@ -647,7 +660,7 @@ public:
LOG_WARN("can not find %s for lora %s", st_file_path.c_str(), lora_path.c_str()); LOG_WARN("can not find %s for lora %s", st_file_path.c_str(), lora_path.c_str());
return; return;
} }
LoraModel lora(backend, model_wtype, file_path); LoraModel lora(backend, file_path);
if (!lora.load_from_file()) { if (!lora.load_from_file()) {
LOG_WARN("load lora tensors from %s failed", file_path.c_str()); LOG_WARN("load lora tensors from %s failed", file_path.c_str());
return; return;
@ -677,7 +690,7 @@ public:
LOG_WARN("can not find %s or %s for lora %s", st_file_path.c_str(), ckpt_file_path.c_str(), lora_name.c_str()); LOG_WARN("can not find %s or %s for lora %s", st_file_path.c_str(), ckpt_file_path.c_str(), lora_name.c_str());
return; return;
} }
LoraModel lora(backend, model_wtype, file_path); LoraModel lora(backend, file_path);
if (!lora.load_from_file()) { if (!lora.load_from_file()) {
LOG_WARN("load lora tensors from %s failed", file_path.c_str()); LOG_WARN("load lora tensors from %s failed", file_path.c_str());
return; return;
@ -724,10 +737,10 @@ public:
ggml_tensor* id_encoder(ggml_context* work_ctx, ggml_tensor* id_encoder(ggml_context* work_ctx,
ggml_tensor* init_img, ggml_tensor* init_img,
ggml_tensor* prompts_embeds, ggml_tensor* prompts_embeds,
ggml_tensor* id_embeds,
std::vector<bool>& class_tokens_mask) { std::vector<bool>& class_tokens_mask) {
ggml_tensor* res = NULL; ggml_tensor* res = NULL;
pmid_model->compute(n_threads, init_img, prompts_embeds, class_tokens_mask, &res, work_ctx); pmid_model->compute(n_threads, init_img, prompts_embeds, id_embeds, class_tokens_mask, &res, work_ctx);
return res; return res;
} }
@ -822,7 +835,11 @@ public:
sample_method_t method, sample_method_t method,
const std::vector<float>& sigmas, const std::vector<float>& sigmas,
int start_merge_step, int start_merge_step,
SDCondition id_cond) { SDCondition id_cond,
std::vector<int> skip_layers = {},
float slg_scale = 0,
float skip_layer_start = 0.01,
float skip_layer_end = 0.2) {
size_t steps = sigmas.size() - 1; size_t steps = sigmas.size() - 1;
// noise = load_tensor_from_file(work_ctx, "./rand0.bin"); // noise = load_tensor_from_file(work_ctx, "./rand0.bin");
// print_ggml_tensor(noise); // print_ggml_tensor(noise);
@ -833,13 +850,24 @@ public:
struct ggml_tensor* noised_input = ggml_dup_tensor(work_ctx, noise); struct ggml_tensor* noised_input = ggml_dup_tensor(work_ctx, noise);
bool has_unconditioned = cfg_scale != 1.0 && uncond.c_crossattn != NULL; bool has_unconditioned = cfg_scale != 1.0 && uncond.c_crossattn != NULL;
bool has_skiplayer = slg_scale != 0.0 && skip_layers.size() > 0;
// denoise wrapper // denoise wrapper
struct ggml_tensor* out_cond = ggml_dup_tensor(work_ctx, x); struct ggml_tensor* out_cond = ggml_dup_tensor(work_ctx, x);
struct ggml_tensor* out_uncond = NULL; struct ggml_tensor* out_uncond = NULL;
struct ggml_tensor* out_skip = NULL;
if (has_unconditioned) { if (has_unconditioned) {
out_uncond = ggml_dup_tensor(work_ctx, x); out_uncond = ggml_dup_tensor(work_ctx, x);
} }
if (has_skiplayer) {
if (sd_version_is_dit(version)) {
out_skip = ggml_dup_tensor(work_ctx, x);
} else {
has_skiplayer = false;
LOG_WARN("SLG is incompatible with %s models", model_version_to_str[version]);
}
}
struct ggml_tensor* denoised = ggml_dup_tensor(work_ctx, x); struct ggml_tensor* denoised = ggml_dup_tensor(work_ctx, x);
auto denoise = [&](ggml_tensor* input, float sigma, int step) -> ggml_tensor* { auto denoise = [&](ggml_tensor* input, float sigma, int step) -> ggml_tensor* {
@ -920,6 +948,28 @@ public:
&out_uncond); &out_uncond);
negative_data = (float*)out_uncond->data; negative_data = (float*)out_uncond->data;
} }
int step_count = sigmas.size();
bool is_skiplayer_step = has_skiplayer && step > (int)(skip_layer_start * step_count) && step < (int)(skip_layer_end * step_count);
float* skip_layer_data = NULL;
if (is_skiplayer_step) {
LOG_DEBUG("Skipping layers at step %d\n", step);
// skip layer (same as conditionned)
diffusion_model->compute(n_threads,
noised_input,
timesteps,
cond.c_crossattn,
cond.c_concat,
cond.c_vector,
guidance_tensor,
-1,
controls,
control_strength,
&out_skip,
NULL,
skip_layers);
skip_layer_data = (float*)out_skip->data;
}
float* vec_denoised = (float*)denoised->data; float* vec_denoised = (float*)denoised->data;
float* vec_input = (float*)input->data; float* vec_input = (float*)input->data;
float* positive_data = (float*)out_cond->data; float* positive_data = (float*)out_cond->data;
@ -936,6 +986,9 @@ public:
latent_result = negative_data[i] + cfg_scale * (positive_data[i] - negative_data[i]); latent_result = negative_data[i] + cfg_scale * (positive_data[i] - negative_data[i]);
} }
} }
if (is_skiplayer_step) {
latent_result = latent_result + (positive_data[i] - skip_layer_data[i]) * slg_scale;
}
// v = latent_result, eps = latent_result // v = latent_result, eps = latent_result
// denoised = (v * c_out + input * c_skip) or (input + eps * c_out) // denoised = (v * c_out + input * c_skip) or (input + eps * c_out)
vec_denoised[i] = latent_result * c_out + vec_input[i] * c_skip; vec_denoised[i] = latent_result * c_out + vec_input[i] * c_skip;
@ -999,9 +1052,9 @@ public:
if (use_tiny_autoencoder) { if (use_tiny_autoencoder) {
C = 4; C = 4;
} else { } else {
if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(version)) {
C = 32; C = 32;
} else if (version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL) { } else if (sd_version_is_flux(version)) {
C = 32; C = 32;
} }
} }
@ -1096,7 +1149,8 @@ sd_ctx_t* new_sd_ctx(const char* model_path_c_str,
enum schedule_t s, enum schedule_t s,
bool keep_clip_on_cpu, bool keep_clip_on_cpu,
bool keep_control_net_cpu, bool keep_control_net_cpu,
bool keep_vae_on_cpu) { bool keep_vae_on_cpu,
bool diffusion_flash_attn) {
sd_ctx_t* sd_ctx = (sd_ctx_t*)malloc(sizeof(sd_ctx_t)); sd_ctx_t* sd_ctx = (sd_ctx_t*)malloc(sizeof(sd_ctx_t));
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
return NULL; return NULL;
@ -1137,7 +1191,8 @@ sd_ctx_t* new_sd_ctx(const char* model_path_c_str,
s, s,
keep_clip_on_cpu, keep_clip_on_cpu,
keep_control_net_cpu, keep_control_net_cpu,
keep_vae_on_cpu)) { keep_vae_on_cpu,
diffusion_flash_attn)) {
delete sd_ctx->sd; delete sd_ctx->sd;
sd_ctx->sd = NULL; sd_ctx->sd = NULL;
free(sd_ctx); free(sd_ctx);
@ -1172,7 +1227,11 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx,
float control_strength, float control_strength,
float style_ratio, float style_ratio,
bool normalize_input, bool normalize_input,
std::string input_id_images_path) { std::string input_id_images_path,
std::vector<int> skip_layers = {},
float slg_scale = 0,
float skip_layer_start = 0.01,
float skip_layer_end = 0.2) {
if (seed < 0) { if (seed < 0) {
// Generally, when using the provided command line, the seed is always >0. // Generally, when using the provided command line, the seed is always >0.
// However, to prevent potential issues if 'stable-diffusion.cpp' is invoked as a library // However, to prevent potential issues if 'stable-diffusion.cpp' is invoked as a library
@ -1228,11 +1287,15 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx,
} }
// preprocess input id images // preprocess input id images
std::vector<sd_image_t*> input_id_images; std::vector<sd_image_t*> input_id_images;
bool pmv2 = sd_ctx->sd->pmid_model->get_version() == PM_VERSION_2;
if (sd_ctx->sd->pmid_model && input_id_images_path.size() > 0) { if (sd_ctx->sd->pmid_model && input_id_images_path.size() > 0) {
std::vector<std::string> img_files = get_files_from_dir(input_id_images_path); std::vector<std::string> img_files = get_files_from_dir(input_id_images_path);
for (std::string img_file : img_files) { for (std::string img_file : img_files) {
int c = 0; int c = 0;
int width, height; int width, height;
if (ends_with(img_file, "safetensors")) {
continue;
}
uint8_t* input_image_buffer = stbi_load(img_file.c_str(), &width, &height, &c, 3); uint8_t* input_image_buffer = stbi_load(img_file.c_str(), &width, &height, &c, 3);
if (input_image_buffer == NULL) { if (input_image_buffer == NULL) {
LOG_ERROR("PhotoMaker load image from '%s' failed", img_file.c_str()); LOG_ERROR("PhotoMaker load image from '%s' failed", img_file.c_str());
@ -1280,10 +1343,15 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx,
sd_ctx->sd->diffusion_model->get_adm_in_channels()); sd_ctx->sd->diffusion_model->get_adm_in_channels());
id_cond = std::get<0>(cond_tup); id_cond = std::get<0>(cond_tup);
class_tokens_mask = std::get<1>(cond_tup); // class_tokens_mask = std::get<1>(cond_tup); //
struct ggml_tensor* id_embeds = NULL;
id_cond.c_crossattn = sd_ctx->sd->id_encoder(work_ctx, init_img, id_cond.c_crossattn, class_tokens_mask); if (pmv2) {
// id_embeds = sd_ctx->sd->pmid_id_embeds->get();
id_embeds = load_tensor_from_file(work_ctx, path_join(input_id_images_path, "id_embeds.bin"));
// print_ggml_tensor(id_embeds, true, "id_embeds:");
}
id_cond.c_crossattn = sd_ctx->sd->id_encoder(work_ctx, init_img, id_cond.c_crossattn, id_embeds, class_tokens_mask);
t1 = ggml_time_ms(); t1 = ggml_time_ms();
LOG_INFO("Photomaker ID Stacking, taking %d ms", t1 - t0); LOG_INFO("Photomaker ID Stacking, taking %" PRId64 " ms", t1 - t0);
if (sd_ctx->sd->free_params_immediately) { if (sd_ctx->sd->free_params_immediately) {
sd_ctx->sd->pmid_model->free_params_buffer(); sd_ctx->sd->pmid_model->free_params_buffer();
} }
@ -1348,9 +1416,9 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx,
// Sample // Sample
std::vector<struct ggml_tensor*> final_latents; // collect latents to decode std::vector<struct ggml_tensor*> final_latents; // collect latents to decode
int C = 4; int C = 4;
if (sd_ctx->sd->version == VERSION_SD3_2B || sd_ctx->sd->version == VERSION_SD3_5_8B || sd_ctx->sd->version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(sd_ctx->sd->version)) {
C = 16; C = 16;
} else if (sd_ctx->sd->version == VERSION_FLUX_DEV || sd_ctx->sd->version == VERSION_FLUX_SCHNELL) { } else if (sd_version_is_flux(sd_ctx->sd->version)) {
C = 16; C = 16;
} }
int W = width / 8; int W = width / 8;
@ -1387,7 +1455,11 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx,
sample_method, sample_method,
sigmas, sigmas,
start_merge_step, start_merge_step,
id_cond); id_cond,
skip_layers,
slg_scale,
skip_layer_start,
skip_layer_end);
// struct ggml_tensor* x_0 = load_tensor_from_file(ctx, "samples_ddim.bin"); // struct ggml_tensor* x_0 = load_tensor_from_file(ctx, "samples_ddim.bin");
// print_ggml_tensor(x_0); // print_ggml_tensor(x_0);
int64_t sampling_end = ggml_time_ms(); int64_t sampling_end = ggml_time_ms();
@ -1453,7 +1525,13 @@ sd_image_t* txt2img(sd_ctx_t* sd_ctx,
float control_strength, float control_strength,
float style_ratio, float style_ratio,
bool normalize_input, bool normalize_input,
const char* input_id_images_path_c_str) { const char* input_id_images_path_c_str,
int* skip_layers = NULL,
size_t skip_layers_count = 0,
float slg_scale = 0,
float skip_layer_start = 0.01,
float skip_layer_end = 0.2) {
std::vector<int> skip_layers_vec(skip_layers, skip_layers + skip_layers_count);
LOG_DEBUG("txt2img %dx%d", width, height); LOG_DEBUG("txt2img %dx%d", width, height);
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
return NULL; return NULL;
@ -1461,10 +1539,10 @@ sd_image_t* txt2img(sd_ctx_t* sd_ctx,
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
if (sd_ctx->sd->version == VERSION_SD3_2B || sd_ctx->sd->version == VERSION_SD3_5_8B || sd_ctx->sd->version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(sd_ctx->sd->version)) {
params.mem_size *= 3; params.mem_size *= 3;
} }
if (sd_ctx->sd->version == VERSION_FLUX_DEV || sd_ctx->sd->version == VERSION_FLUX_SCHNELL) { if (sd_version_is_flux(sd_ctx->sd->version)) {
params.mem_size *= 4; params.mem_size *= 4;
} }
if (sd_ctx->sd->stacked_id) { if (sd_ctx->sd->stacked_id) {
@ -1487,17 +1565,17 @@ sd_image_t* txt2img(sd_ctx_t* sd_ctx,
std::vector<float> sigmas = sd_ctx->sd->denoiser->get_sigmas(sample_steps); std::vector<float> sigmas = sd_ctx->sd->denoiser->get_sigmas(sample_steps);
int C = 4; int C = 4;
if (sd_ctx->sd->version == VERSION_SD3_2B || sd_ctx->sd->version == VERSION_SD3_5_8B || sd_ctx->sd->version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(sd_ctx->sd->version)) {
C = 16; C = 16;
} else if (sd_ctx->sd->version == VERSION_FLUX_DEV || sd_ctx->sd->version == VERSION_FLUX_SCHNELL) { } else if (sd_version_is_flux(sd_ctx->sd->version)) {
C = 16; C = 16;
} }
int W = width / 8; int W = width / 8;
int H = height / 8; int H = height / 8;
ggml_tensor* init_latent = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, C, 1); ggml_tensor* init_latent = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, C, 1);
if (sd_ctx->sd->version == VERSION_SD3_2B || sd_ctx->sd->version == VERSION_SD3_5_8B || sd_ctx->sd->version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(sd_ctx->sd->version)) {
ggml_set_f32(init_latent, 0.0609f); ggml_set_f32(init_latent, 0.0609f);
} else if (sd_ctx->sd->version == VERSION_FLUX_DEV || sd_ctx->sd->version == VERSION_FLUX_SCHNELL) { } else if (sd_version_is_flux(sd_ctx->sd->version)) {
ggml_set_f32(init_latent, 0.1159f); ggml_set_f32(init_latent, 0.1159f);
} else { } else {
ggml_set_f32(init_latent, 0.f); ggml_set_f32(init_latent, 0.f);
@ -1521,7 +1599,11 @@ sd_image_t* txt2img(sd_ctx_t* sd_ctx,
control_strength, control_strength,
style_ratio, style_ratio,
normalize_input, normalize_input,
input_id_images_path_c_str); input_id_images_path_c_str,
skip_layers_vec,
slg_scale,
skip_layer_start,
skip_layer_end);
size_t t1 = ggml_time_ms(); size_t t1 = ggml_time_ms();
@ -1548,7 +1630,13 @@ sd_image_t* img2img(sd_ctx_t* sd_ctx,
float control_strength, float control_strength,
float style_ratio, float style_ratio,
bool normalize_input, bool normalize_input,
const char* input_id_images_path_c_str) { const char* input_id_images_path_c_str,
int* skip_layers = NULL,
size_t skip_layers_count = 0,
float slg_scale = 0,
float skip_layer_start = 0.01,
float skip_layer_end = 0.2) {
std::vector<int> skip_layers_vec(skip_layers, skip_layers + skip_layers_count);
LOG_DEBUG("img2img %dx%d", width, height); LOG_DEBUG("img2img %dx%d", width, height);
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
return NULL; return NULL;
@ -1556,10 +1644,10 @@ sd_image_t* img2img(sd_ctx_t* sd_ctx,
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
if (sd_ctx->sd->version == VERSION_SD3_2B || sd_ctx->sd->version == VERSION_SD3_5_8B || sd_ctx->sd->version == VERSION_SD3_5_2B) { if (sd_version_is_sd3(sd_ctx->sd->version)) {
params.mem_size *= 2; params.mem_size *= 2;
} }
if (sd_ctx->sd->version == VERSION_FLUX_DEV || sd_ctx->sd->version == VERSION_FLUX_SCHNELL) { if (sd_version_is_flux(sd_ctx->sd->version)) {
params.mem_size *= 3; params.mem_size *= 3;
} }
if (sd_ctx->sd->stacked_id) { if (sd_ctx->sd->stacked_id) {
@ -1622,7 +1710,11 @@ sd_image_t* img2img(sd_ctx_t* sd_ctx,
control_strength, control_strength,
style_ratio, style_ratio,
normalize_input, normalize_input,
input_id_images_path_c_str); input_id_images_path_c_str,
skip_layers_vec,
slg_scale,
skip_layer_start,
skip_layer_end);
size_t t2 = ggml_time_ms(); size_t t2 = ggml_time_ms();

View file

@ -150,7 +150,8 @@ SD_API sd_ctx_t* new_sd_ctx(const char* model_path,
enum schedule_t s, enum schedule_t s,
bool keep_clip_on_cpu, bool keep_clip_on_cpu,
bool keep_control_net_cpu, bool keep_control_net_cpu,
bool keep_vae_on_cpu); bool keep_vae_on_cpu,
bool diffusion_flash_attn);
SD_API void free_sd_ctx(sd_ctx_t* sd_ctx); SD_API void free_sd_ctx(sd_ctx_t* sd_ctx);
@ -170,7 +171,12 @@ SD_API sd_image_t* txt2img(sd_ctx_t* sd_ctx,
float control_strength, float control_strength,
float style_strength, float style_strength,
bool normalize_input, bool normalize_input,
const char* input_id_images_path); const char* input_id_images_path,
int* skip_layers,
size_t skip_layers_count,
float slg_scale,
float skip_layer_start,
float skip_layer_end);
SD_API sd_image_t* img2img(sd_ctx_t* sd_ctx, SD_API sd_image_t* img2img(sd_ctx_t* sd_ctx,
sd_image_t init_image, sd_image_t init_image,
@ -190,7 +196,12 @@ SD_API sd_image_t* img2img(sd_ctx_t* sd_ctx,
float control_strength, float control_strength,
float style_strength, float style_strength,
bool normalize_input, bool normalize_input,
const char* input_id_images_path); const char* input_id_images_path,
int* skip_layers,
size_t skip_layers_count,
float slg_scale,
float skip_layer_start,
float skip_layer_end);
SD_API sd_image_t* img2vid(sd_ctx_t* sd_ctx, SD_API sd_image_t* img2vid(sd_ctx_t* sd_ctx,
sd_image_t init_image, sd_image_t init_image,
@ -210,8 +221,7 @@ SD_API sd_image_t* img2vid(sd_ctx_t* sd_ctx,
typedef struct upscaler_ctx_t upscaler_ctx_t; typedef struct upscaler_ctx_t upscaler_ctx_t;
SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path, SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path,
int n_threads, int n_threads);
enum sd_type_t wtype);
SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx); SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx);
SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor); SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor);

View file

@ -441,8 +441,9 @@ protected:
int64_t hidden_size; int64_t hidden_size;
float eps; float eps;
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size); enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
} }
public: public:
@ -717,14 +718,15 @@ struct T5Runner : public GGMLRunner {
std::vector<int> relative_position_bucket_vec; std::vector<int> relative_position_bucket_vec;
T5Runner(ggml_backend_t backend, T5Runner(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
const std::string prefix,
int64_t num_layers = 24, int64_t num_layers = 24,
int64_t model_dim = 4096, int64_t model_dim = 4096,
int64_t ff_dim = 10240, int64_t ff_dim = 10240,
int64_t num_heads = 64, int64_t num_heads = 64,
int64_t vocab_size = 32128) int64_t vocab_size = 32128)
: GGMLRunner(backend, wtype), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) { : GGMLRunner(backend), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) {
model.init(params_ctx, wtype); model.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {
@ -854,14 +856,17 @@ struct T5Embedder {
T5UniGramTokenizer tokenizer; T5UniGramTokenizer tokenizer;
T5Runner model; T5Runner model;
static std::map<std::string, enum ggml_type> empty_tensor_types;
T5Embedder(ggml_backend_t backend, T5Embedder(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types = empty_tensor_types,
const std::string prefix = "",
int64_t num_layers = 24, int64_t num_layers = 24,
int64_t model_dim = 4096, int64_t model_dim = 4096,
int64_t ff_dim = 10240, int64_t ff_dim = 10240,
int64_t num_heads = 64, int64_t num_heads = 64,
int64_t vocab_size = 32128) int64_t vocab_size = 32128)
: model(backend, wtype, num_layers, model_dim, ff_dim, num_heads, vocab_size) { : model(backend, tensor_types, prefix, num_layers, model_dim, ff_dim, num_heads, vocab_size) {
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
@ -951,7 +956,7 @@ struct T5Embedder {
// ggml_backend_t backend = ggml_backend_cuda_init(0); // ggml_backend_t backend = ggml_backend_cuda_init(0);
ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_t backend = ggml_backend_cpu_init();
ggml_type model_data_type = GGML_TYPE_F32; ggml_type model_data_type = GGML_TYPE_F32;
std::shared_ptr<T5Embedder> t5 = std::shared_ptr<T5Embedder>(new T5Embedder(backend, model_data_type)); std::shared_ptr<T5Embedder> t5 = std::shared_ptr<T5Embedder>(new T5Embedder(backend));
{ {
LOG_INFO("loading from '%s'", file_path.c_str()); LOG_INFO("loading from '%s'", file_path.c_str());

View file

@ -188,12 +188,13 @@ struct TinyAutoEncoder : public GGMLRunner {
bool decode_only = false; bool decode_only = false;
TinyAutoEncoder(ggml_backend_t backend, TinyAutoEncoder(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
const std::string prefix,
bool decoder_only = true) bool decoder_only = true)
: decode_only(decoder_only), : decode_only(decoder_only),
taesd(decode_only), taesd(decode_only),
GGMLRunner(backend, wtype) { GGMLRunner(backend) {
taesd.init(params_ctx, wtype); taesd.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {

View file

@ -183,7 +183,7 @@ public:
int model_channels = 320; int model_channels = 320;
int adm_in_channels = 2816; // only for VERSION_SDXL/SVD int adm_in_channels = 2816; // only for VERSION_SDXL/SVD
UnetModelBlock(SDVersion version = VERSION_SD1) UnetModelBlock(SDVersion version = VERSION_SD1, bool flash_attn = false)
: version(version) { : version(version) {
if (version == VERSION_SD2) { if (version == VERSION_SD2) {
context_dim = 1024; context_dim = 1024;
@ -242,7 +242,7 @@ public:
if (version == VERSION_SVD) { if (version == VERSION_SVD) {
return new SpatialVideoTransformer(in_channels, n_head, d_head, depth, context_dim); return new SpatialVideoTransformer(in_channels, n_head, d_head, depth, context_dim);
} else { } else {
return new SpatialTransformer(in_channels, n_head, d_head, depth, context_dim); return new SpatialTransformer(in_channels, n_head, d_head, depth, context_dim, flash_attn);
} }
}; };
@ -532,10 +532,12 @@ struct UNetModelRunner : public GGMLRunner {
UnetModelBlock unet; UnetModelBlock unet;
UNetModelRunner(ggml_backend_t backend, UNetModelRunner(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
SDVersion version = VERSION_SD1) const std::string prefix,
: GGMLRunner(backend, wtype), unet(version) { SDVersion version = VERSION_SD1,
unet.init(params_ctx, wtype); bool flash_attn = false)
: GGMLRunner(backend), unet(version, flash_attn) {
unet.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {

View file

@ -31,13 +31,17 @@ struct UpscalerGGML {
LOG_DEBUG("Using SYCL backend"); LOG_DEBUG("Using SYCL backend");
backend = ggml_backend_sycl_init(0); backend = ggml_backend_sycl_init(0);
#endif #endif
ModelLoader model_loader;
if (!model_loader.init_from_file(esrgan_path)) {
LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str());
}
model_loader.set_wtype_override(model_data_type);
if (!backend) { if (!backend) {
LOG_DEBUG("Using CPU backend"); LOG_DEBUG("Using CPU backend");
backend = ggml_backend_cpu_init(); backend = ggml_backend_cpu_init();
} }
LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type));
esrgan_upscaler = std::make_shared<ESRGAN>(backend, model_data_type); esrgan_upscaler = std::make_shared<ESRGAN>(backend, model_loader.tensor_storages_types);
if (!esrgan_upscaler->load_from_file(esrgan_path)) { if (!esrgan_upscaler->load_from_file(esrgan_path)) {
return false; return false;
} }
@ -95,8 +99,7 @@ struct upscaler_ctx_t {
}; };
upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str,
int n_threads, int n_threads) {
enum sd_type_t wtype) {
upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t));
if (upscaler_ctx == NULL) { if (upscaler_ctx == NULL) {
return NULL; return NULL;

View file

@ -25,6 +25,7 @@
#include <unistd.h> #include <unistd.h>
#endif #endif
#include "ggml-cpu.h"
#include "ggml.h" #include "ggml.h"
#include "stable-diffusion.h" #include "stable-diffusion.h"
@ -279,6 +280,23 @@ std::string path_join(const std::string& p1, const std::string& p2) {
return p1 + "/" + p2; return p1 + "/" + p2;
} }
std::vector<std::string> splitString(const std::string& str, char delimiter) {
std::vector<std::string> result;
size_t start = 0;
size_t end = str.find(delimiter);
while (end != std::string::npos) {
result.push_back(str.substr(start, end - start));
start = end + 1;
end = str.find(delimiter, start);
}
// Add the last segment after the last delimiter
result.push_back(str.substr(start));
return result;
}
sd_image_t* preprocess_id_image(sd_image_t* img) { sd_image_t* preprocess_id_image(sd_image_t* img) {
int shortest_edge = 224; int shortest_edge = 224;
int size = shortest_edge; int size = shortest_edge;

View file

@ -45,7 +45,7 @@ sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int size); sd_image_f32_t clip_preprocess(sd_image_f32_t image, int size);
std::string path_join(const std::string& p1, const std::string& p2); std::string path_join(const std::string& p1, const std::string& p2);
std::vector<std::string> splitString(const std::string& str, char delimiter);
void pretty_progress(int step, int steps, float time); void pretty_progress(int step, int steps, float time);
void log_printf(sd_log_level_t level, const char* file, int line, const char* format, ...); void log_printf(sd_log_level_t level, const char* file, int line, const char* format, ...);

View file

@ -163,8 +163,9 @@ public:
class VideoResnetBlock : public ResnetBlock { class VideoResnetBlock : public ResnetBlock {
protected: protected:
void init_params(struct ggml_context* ctx, ggml_type wtype) { void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); enum ggml_type wtype = (tensor_types.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
} }
float get_alpha() { float get_alpha() {
@ -457,7 +458,7 @@ public:
bool use_video_decoder = false, bool use_video_decoder = false,
SDVersion version = VERSION_SD1) SDVersion version = VERSION_SD1)
: decode_only(decode_only), use_video_decoder(use_video_decoder) { : decode_only(decode_only), use_video_decoder(use_video_decoder) {
if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B || version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL) { if (sd_version_is_dit(version)) {
dd_config.z_channels = 16; dd_config.z_channels = 16;
use_quant = false; use_quant = false;
} }
@ -524,12 +525,13 @@ struct AutoEncoderKL : public GGMLRunner {
AutoencodingEngine ae; AutoencodingEngine ae;
AutoEncoderKL(ggml_backend_t backend, AutoEncoderKL(ggml_backend_t backend,
ggml_type wtype, std::map<std::string, enum ggml_type>& tensor_types,
const std::string prefix,
bool decode_only = false, bool decode_only = false,
bool use_video_decoder = false, bool use_video_decoder = false,
SDVersion version = VERSION_SD1) SDVersion version = VERSION_SD1)
: decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend, wtype) { : decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend) {
ae.init(params_ctx, wtype); ae.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() {