diff --git a/ldm_patched/contrib/external.py b/ldm_patched/contrib/external.py
index 9d2238d..927cd3f 100644
--- a/ldm_patched/contrib/external.py
+++ b/ldm_patched/contrib/external.py
@@ -361,6 +361,62 @@ class VAEEncodeForInpaint:
return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
+
+class InpaintModelConditioning:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"positive": ("CONDITIONING", ),
+ "negative": ("CONDITIONING", ),
+ "vae": ("VAE", ),
+ "pixels": ("IMAGE", ),
+ "mask": ("MASK", ),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/inpaint"
+
+ def encode(self, positive, negative, pixels, vae, mask):
+ x = (pixels.shape[1] // 8) * 8
+ y = (pixels.shape[2] // 8) * 8
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
+
+ orig_pixels = pixels
+ pixels = orig_pixels.clone()
+ if pixels.shape[1] != x or pixels.shape[2] != y:
+ x_offset = (pixels.shape[1] % 8) // 2
+ y_offset = (pixels.shape[2] % 8) // 2
+ pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
+ mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
+
+ m = (1.0 - mask.round()).squeeze(1)
+ for i in range(3):
+ pixels[:,:,:,i] -= 0.5
+ pixels[:,:,:,i] *= m
+ pixels[:,:,:,i] += 0.5
+ concat_latent = vae.encode(pixels)
+ orig_latent = vae.encode(orig_pixels)
+
+ out_latent = {}
+
+ out_latent["samples"] = orig_latent
+ out_latent["noise_mask"] = mask
+
+ out = []
+ for conditioning in [positive, negative]:
+ c = []
+ for t in conditioning:
+ d = t[1].copy()
+ d["concat_latent_image"] = concat_latent
+ d["concat_mask"] = mask
+ n = [t[0], d]
+ c.append(n)
+ out.append(c)
+ return (out[0], out[1], out_latent)
+
+
class SaveLatent:
def __init__(self):
self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
@@ -1417,6 +1473,8 @@ class LoadImage:
output_masks = []
for i in ImageSequence.Iterator(img):
i = ImageOps.exif_transpose(i)
+ if i.mode == 'I':
+ i = i.point(lambda i: i * (1 / 255))
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
@@ -1472,6 +1530,8 @@ class LoadImageMask:
i = Image.open(image_path)
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
+ if i.mode == 'I':
+ i = i.point(lambda i: i * (1 / 255))
i = i.convert("RGBA")
mask = None
c = channel[0].upper()
@@ -1626,10 +1686,11 @@ class ImagePadForOutpaint:
def expand_image(self, image, left, top, right, bottom, feathering):
d1, d2, d3, d4 = image.size()
- new_image = torch.zeros(
+ new_image = torch.ones(
(d1, d2 + top + bottom, d3 + left + right, d4),
dtype=torch.float32,
- )
+ ) * 0.5
+
new_image[:, top:top + d2, left:left + d3, :] = image
mask = torch.ones(
@@ -1721,6 +1782,7 @@ NODE_CLASS_MAPPINGS = {
"unCLIPCheckpointLoader": unCLIPCheckpointLoader,
"GLIGENLoader": GLIGENLoader,
"GLIGENTextBoxApply": GLIGENTextBoxApply,
+ "InpaintModelConditioning": InpaintModelConditioning,
"CheckpointLoader": CheckpointLoader,
"DiffusersLoader": DiffusersLoader,
@@ -1882,6 +1944,8 @@ def init_custom_nodes():
"nodes_sag.py",
"nodes_perpneg.py",
"nodes_stable3d.py",
+ "nodes_sdupscale.py",
+ "nodes_photomaker.py",
]
for node_file in extras_files:
diff --git a/ldm_patched/contrib/external_custom_sampler.py b/ldm_patched/contrib/external_custom_sampler.py
index 6e5a769..8f92e84 100644
--- a/ldm_patched/contrib/external_custom_sampler.py
+++ b/ldm_patched/contrib/external_custom_sampler.py
@@ -15,6 +15,7 @@ class BasicScheduler:
{"model": ("MODEL",),
"scheduler": (ldm_patched.modules.samplers.SCHEDULER_NAMES, ),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
}
}
RETURN_TYPES = ("SIGMAS",)
@@ -22,8 +23,14 @@ class BasicScheduler:
FUNCTION = "get_sigmas"
- def get_sigmas(self, model, scheduler, steps):
- sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu()
+ def get_sigmas(self, model, scheduler, steps, denoise):
+ total_steps = steps
+ if denoise < 1.0:
+ total_steps = int(steps/denoise)
+
+ ldm_patched.modules.model_management.load_models_gpu([model])
+ sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu()
+ sigmas = sigmas[-(steps + 1):]
return (sigmas, )
@@ -100,6 +107,7 @@ class SDTurboScheduler:
def get_sigmas(self, model, steps, denoise):
start_step = 10 - int(10 * denoise)
timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps]
+ ldm_patched.modules.model_management.load_models_gpu([model])
sigmas = model.model.model_sampling.sigma(timesteps)
sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
return (sigmas, )
diff --git a/ldm_patched/contrib/external_freelunch.py b/ldm_patched/contrib/external_freelunch.py
index f8dd5a4..59ec5ba 100644
--- a/ldm_patched/contrib/external_freelunch.py
+++ b/ldm_patched/contrib/external_freelunch.py
@@ -36,7 +36,7 @@ class FreeU:
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
- CATEGORY = "_for_testing"
+ CATEGORY = "model_patches"
def patch(self, model, b1, b2, s1, s2):
model_channels = model.model.model_config.unet_config["model_channels"]
@@ -75,7 +75,7 @@ class FreeU_V2:
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
- CATEGORY = "_for_testing"
+ CATEGORY = "model_patches"
def patch(self, model, b1, b2, s1, s2):
model_channels = model.model.model_config.unet_config["model_channels"]
diff --git a/ldm_patched/contrib/external_hypertile.py b/ldm_patched/contrib/external_hypertile.py
index 45f7c3e..5cf7d9d 100644
--- a/ldm_patched/contrib/external_hypertile.py
+++ b/ldm_patched/contrib/external_hypertile.py
@@ -34,29 +34,29 @@ class HyperTile:
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
- CATEGORY = "_for_testing"
+ CATEGORY = "model_patches"
def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
model_channels = model.model.model_config.unet_config["model_channels"]
- apply_to = set()
- temp = model_channels
- for x in range(max_depth + 1):
- apply_to.add(temp)
- temp *= 2
-
latent_tile_size = max(32, tile_size) // 8
self.temp = None
def hypertile_in(q, k, v, extra_options):
- if q.shape[-1] in apply_to:
+ model_chans = q.shape[-2]
+ orig_shape = extra_options['original_shape']
+ apply_to = []
+ for i in range(max_depth + 1):
+ apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
+
+ if model_chans in apply_to:
shape = extra_options["original_shape"]
aspect_ratio = shape[-1] / shape[-2]
hw = q.size(1)
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
- factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
+ factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
nh = random_divisor(h, latent_tile_size * factor, swap_size)
nw = random_divisor(w, latent_tile_size * factor, swap_size)
diff --git a/ldm_patched/contrib/external_latent.py b/ldm_patched/contrib/external_latent.py
index c6f874e..6d753d0 100644
--- a/ldm_patched/contrib/external_latent.py
+++ b/ldm_patched/contrib/external_latent.py
@@ -124,10 +124,34 @@ class LatentBatch:
samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
return (samples_out,)
+class LatentBatchSeedBehavior:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "samples": ("LATENT",),
+ "seed_behavior": (["random", "fixed"],),}}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "op"
+
+ CATEGORY = "latent/advanced"
+
+ def op(self, samples, seed_behavior):
+ samples_out = samples.copy()
+ latent = samples["samples"]
+ if seed_behavior == "random":
+ if 'batch_index' in samples_out:
+ samples_out.pop('batch_index')
+ elif seed_behavior == "fixed":
+ batch_number = samples_out.get("batch_index", [0])[0]
+ samples_out["batch_index"] = [batch_number] * latent.shape[0]
+
+ return (samples_out,)
+
NODE_CLASS_MAPPINGS = {
"LatentAdd": LatentAdd,
"LatentSubtract": LatentSubtract,
"LatentMultiply": LatentMultiply,
"LatentInterpolate": LatentInterpolate,
"LatentBatch": LatentBatch,
+ "LatentBatchSeedBehavior": LatentBatchSeedBehavior,
}
diff --git a/ldm_patched/contrib/external_model_merging.py b/ldm_patched/contrib/external_model_merging.py
index c0cf9af..ae8145d 100644
--- a/ldm_patched/contrib/external_model_merging.py
+++ b/ldm_patched/contrib/external_model_merging.py
@@ -121,6 +121,48 @@ class ModelMergeBlocks:
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
return (m, )
+def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefix=None, output_dir=None, prompt=None, extra_pnginfo=None):
+ full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, output_dir)
+ prompt_info = ""
+ if prompt is not None:
+ prompt_info = json.dumps(prompt)
+
+ metadata = {}
+
+ enable_modelspec = True
+ if isinstance(model.model, ldm_patched.modules.model_base.SDXL):
+ metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base"
+ elif isinstance(model.model, ldm_patched.modules.model_base.SDXLRefiner):
+ metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
+ else:
+ enable_modelspec = False
+
+ if enable_modelspec:
+ metadata["modelspec.sai_model_spec"] = "1.0.0"
+ metadata["modelspec.implementation"] = "sgm"
+ metadata["modelspec.title"] = "{} {}".format(filename, counter)
+
+ #TODO:
+ # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512",
+ # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h",
+ # "v2-inpainting"
+
+ if model.model.model_type == ldm_patched.modules.model_base.ModelType.EPS:
+ metadata["modelspec.predict_key"] = "epsilon"
+ elif model.model.model_type == ldm_patched.modules.model_base.ModelType.V_PREDICTION:
+ metadata["modelspec.predict_key"] = "v"
+
+ if not args.disable_server_info:
+ metadata["prompt"] = prompt_info
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata[x] = json.dumps(extra_pnginfo[x])
+
+ output_checkpoint = f"{filename}_{counter:05}_.safetensors"
+ output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
+
+ ldm_patched.modules.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata)
+
class CheckpointSave:
def __init__(self):
self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
@@ -139,46 +181,7 @@ class CheckpointSave:
CATEGORY = "advanced/model_merging"
def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=None):
- full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, self.output_dir)
- prompt_info = ""
- if prompt is not None:
- prompt_info = json.dumps(prompt)
-
- metadata = {}
-
- enable_modelspec = True
- if isinstance(model.model, ldm_patched.modules.model_base.SDXL):
- metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base"
- elif isinstance(model.model, ldm_patched.modules.model_base.SDXLRefiner):
- metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
- else:
- enable_modelspec = False
-
- if enable_modelspec:
- metadata["modelspec.sai_model_spec"] = "1.0.0"
- metadata["modelspec.implementation"] = "sgm"
- metadata["modelspec.title"] = "{} {}".format(filename, counter)
-
- #TODO:
- # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512",
- # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h",
- # "v2-inpainting"
-
- if model.model.model_type == ldm_patched.modules.model_base.ModelType.EPS:
- metadata["modelspec.predict_key"] = "epsilon"
- elif model.model.model_type == ldm_patched.modules.model_base.ModelType.V_PREDICTION:
- metadata["modelspec.predict_key"] = "v"
-
- if not args.disable_server_info:
- metadata["prompt"] = prompt_info
- if extra_pnginfo is not None:
- for x in extra_pnginfo:
- metadata[x] = json.dumps(extra_pnginfo[x])
-
- output_checkpoint = f"{filename}_{counter:05}_.safetensors"
- output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
-
- ldm_patched.modules.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata)
+ save_checkpoint(model, clip=clip, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
return {}
class CLIPSave:
diff --git a/ldm_patched/contrib/external_photomaker.py b/ldm_patched/contrib/external_photomaker.py
new file mode 100644
index 0000000..cc7f671
--- /dev/null
+++ b/ldm_patched/contrib/external_photomaker.py
@@ -0,0 +1,189 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
+
+import torch
+import torch.nn as nn
+import ldm_patched.utils.path_utils
+import ldm_patched.modules.clip_model
+import ldm_patched.modules.clip_vision
+import ldm_patched.modules.ops
+
+# code for model from: https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0
+VISION_CONFIG_DICT = {
+ "hidden_size": 1024,
+ "image_size": 224,
+ "intermediate_size": 4096,
+ "num_attention_heads": 16,
+ "num_channels": 3,
+ "num_hidden_layers": 24,
+ "patch_size": 14,
+ "projection_dim": 768,
+ "hidden_act": "quick_gelu",
+}
+
+class MLP(nn.Module):
+ def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=ldm_patched.modules.ops):
+ super().__init__()
+ if use_residual:
+ assert in_dim == out_dim
+ self.layernorm = operations.LayerNorm(in_dim)
+ self.fc1 = operations.Linear(in_dim, hidden_dim)
+ self.fc2 = operations.Linear(hidden_dim, out_dim)
+ self.use_residual = use_residual
+ self.act_fn = nn.GELU()
+
+ def forward(self, x):
+ residual = x
+ x = self.layernorm(x)
+ x = self.fc1(x)
+ x = self.act_fn(x)
+ x = self.fc2(x)
+ if self.use_residual:
+ x = x + residual
+ return x
+
+
+class FuseModule(nn.Module):
+ def __init__(self, embed_dim, operations):
+ super().__init__()
+ self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations)
+ self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations)
+ self.layer_norm = operations.LayerNorm(embed_dim)
+
+ def fuse_fn(self, prompt_embeds, id_embeds):
+ stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
+ stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
+ stacked_id_embeds = self.mlp2(stacked_id_embeds)
+ stacked_id_embeds = self.layer_norm(stacked_id_embeds)
+ return stacked_id_embeds
+
+ def forward(
+ self,
+ prompt_embeds,
+ id_embeds,
+ class_tokens_mask,
+ ) -> torch.Tensor:
+ # id_embeds shape: [b, max_num_inputs, 1, 2048]
+ id_embeds = id_embeds.to(prompt_embeds.dtype)
+ num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
+ batch_size, max_num_inputs = id_embeds.shape[:2]
+ # seq_length: 77
+ seq_length = prompt_embeds.shape[1]
+ # flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
+ flat_id_embeds = id_embeds.view(
+ -1, id_embeds.shape[-2], id_embeds.shape[-1]
+ )
+ # valid_id_mask [b*max_num_inputs]
+ valid_id_mask = (
+ torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
+ < num_inputs[:, None]
+ )
+ valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
+
+ prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
+ class_tokens_mask = class_tokens_mask.view(-1)
+ valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
+ # slice out the image token embeddings
+ image_token_embeds = prompt_embeds[class_tokens_mask]
+ stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
+ assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
+ prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
+ updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1)
+ return updated_prompt_embeds
+
+class PhotoMakerIDEncoder(ldm_patched.modules.clip_model.CLIPVisionModelProjection):
+ def __init__(self):
+ self.load_device = ldm_patched.modules.model_management.text_encoder_device()
+ offload_device = ldm_patched.modules.model_management.text_encoder_offload_device()
+ dtype = ldm_patched.modules.model_management.text_encoder_dtype(self.load_device)
+
+ super().__init__(VISION_CONFIG_DICT, dtype, offload_device, ldm_patched.modules.ops.manual_cast)
+ self.visual_projection_2 = ldm_patched.modules.ops.manual_cast.Linear(1024, 1280, bias=False)
+ self.fuse_module = FuseModule(2048, ldm_patched.modules.ops.manual_cast)
+
+ def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
+ b, num_inputs, c, h, w = id_pixel_values.shape
+ id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
+
+ shared_id_embeds = self.vision_model(id_pixel_values)[2]
+ id_embeds = self.visual_projection(shared_id_embeds)
+ id_embeds_2 = self.visual_projection_2(shared_id_embeds)
+
+ id_embeds = id_embeds.view(b, num_inputs, 1, -1)
+ id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
+
+ id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
+ updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
+
+ return updated_prompt_embeds
+
+
+class PhotoMakerLoader:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "photomaker_model_name": (ldm_patched.utils.path_utils.get_filename_list("photomaker"), )}}
+
+ RETURN_TYPES = ("PHOTOMAKER",)
+ FUNCTION = "load_photomaker_model"
+
+ CATEGORY = "_for_testing/photomaker"
+
+ def load_photomaker_model(self, photomaker_model_name):
+ photomaker_model_path = ldm_patched.utils.path_utils.get_full_path("photomaker", photomaker_model_name)
+ photomaker_model = PhotoMakerIDEncoder()
+ data = ldm_patched.modules.utils.load_torch_file(photomaker_model_path, safe_load=True)
+ if "id_encoder" in data:
+ data = data["id_encoder"]
+ photomaker_model.load_state_dict(data)
+ return (photomaker_model,)
+
+
+class PhotoMakerEncode:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "photomaker": ("PHOTOMAKER",),
+ "image": ("IMAGE",),
+ "clip": ("CLIP", ),
+ "text": ("STRING", {"multiline": True, "default": "photograph of photomaker"}),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING",)
+ FUNCTION = "apply_photomaker"
+
+ CATEGORY = "_for_testing/photomaker"
+
+ def apply_photomaker(self, photomaker, image, clip, text):
+ special_token = "photomaker"
+ pixel_values = ldm_patched.modules.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float()
+ try:
+ index = text.split(" ").index(special_token) + 1
+ except ValueError:
+ index = -1
+ tokens = clip.tokenize(text, return_word_ids=True)
+ out_tokens = {}
+ for k in tokens:
+ out_tokens[k] = []
+ for t in tokens[k]:
+ f = list(filter(lambda x: x[2] != index, t))
+ while len(f) < len(t):
+ f.append(t[-1])
+ out_tokens[k].append(f)
+
+ cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True)
+
+ if index > 0:
+ token_index = index - 1
+ num_id_images = 1
+ class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)]
+ out = photomaker(id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device),
+ class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0))
+ else:
+ out = cond
+
+ return ([[out, {"pooled_output": pooled}]], )
+
+
+NODE_CLASS_MAPPINGS = {
+ "PhotoMakerLoader": PhotoMakerLoader,
+ "PhotoMakerEncode": PhotoMakerEncode,
+}
+
diff --git a/ldm_patched/contrib/external_post_processing.py b/ldm_patched/contrib/external_post_processing.py
index 432c53f..93cb121 100644
--- a/ldm_patched/contrib/external_post_processing.py
+++ b/ldm_patched/contrib/external_post_processing.py
@@ -35,6 +35,7 @@ class Blend:
CATEGORY = "image/postprocessing"
def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
+ image2 = image2.to(image1.device)
if image1.shape != image2.shape:
image2 = image2.permute(0, 3, 1, 2)
image2 = ldm_patched.modules.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center')
diff --git a/ldm_patched/contrib/external_sag.py b/ldm_patched/contrib/external_sag.py
index 9cffe87..804d561 100644
--- a/ldm_patched/contrib/external_sag.py
+++ b/ldm_patched/contrib/external_sag.py
@@ -60,7 +60,7 @@ def create_blur_map(x0, attn, sigma=3.0, threshold=1.0):
attn = attn.reshape(b, -1, hw1, hw2)
# Global Average Pool
mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold
- ratio = math.ceil(math.sqrt(lh * lw / hw1))
+ ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length()
mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)]
# Reshape
@@ -145,6 +145,8 @@ class SelfAttentionGuidance:
sigma = args["sigma"]
model_options = args["model_options"]
x = args["input"]
+ if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding
+ return cfg_result
# create the adversarially blurred image
degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold)
diff --git a/ldm_patched/contrib/external_sdupscale.py b/ldm_patched/contrib/external_sdupscale.py
new file mode 100644
index 0000000..68153c4
--- /dev/null
+++ b/ldm_patched/contrib/external_sdupscale.py
@@ -0,0 +1,49 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
+
+import torch
+import ldm_patched.contrib.external
+import ldm_patched.modules.utils
+
+class SD_4XUpscale_Conditioning:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "images": ("IMAGE",),
+ "positive": ("CONDITIONING",),
+ "negative": ("CONDITIONING",),
+ "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
+ }}
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/upscale_diffusion"
+
+ def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
+ width = max(1, round(images.shape[-2] * scale_ratio))
+ height = max(1, round(images.shape[-3] * scale_ratio))
+
+ pixels = ldm_patched.modules.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center")
+
+ out_cp = []
+ out_cn = []
+
+ for t in positive:
+ n = [t[0], t[1].copy()]
+ n[1]['concat_image'] = pixels
+ n[1]['noise_augmentation'] = noise_augmentation
+ out_cp.append(n)
+
+ for t in negative:
+ n = [t[0], t[1].copy()]
+ n[1]['concat_image'] = pixels
+ n[1]['noise_augmentation'] = noise_augmentation
+ out_cn.append(n)
+
+ latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
+ return (out_cp, out_cn, {"samples":latent})
+
+NODE_CLASS_MAPPINGS = {
+ "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
+}
diff --git a/ldm_patched/contrib/external_stable3d.py b/ldm_patched/contrib/external_stable3d.py
index 2913a3d..bae2623 100644
--- a/ldm_patched/contrib/external_stable3d.py
+++ b/ldm_patched/contrib/external_stable3d.py
@@ -48,13 +48,57 @@ class StableZero123_Conditioning:
encode_pixels = pixels[:,:,:,:3]
t = vae.encode(encode_pixels)
cam_embeds = camera_embeddings(elevation, azimuth)
- cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1)
+ cond = torch.cat([pooled, cam_embeds.to(pooled.device).repeat((pooled.shape[0], 1, 1))], dim=-1)
positive = [[cond, {"concat_latent_image": t}]]
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]
latent = torch.zeros([batch_size, 4, height // 8, width // 8])
return (positive, negative, {"samples":latent})
+class StableZero123_Conditioning_Batched:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip_vision": ("CLIP_VISION",),
+ "init_image": ("IMAGE",),
+ "vae": ("VAE",),
+ "width": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "elevation_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "azimuth_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ }}
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/3d_models"
+
+ def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment):
+ output = clip_vision.encode_image(init_image)
+ pooled = output.image_embeds.unsqueeze(0)
+ pixels = ldm_patched.modules.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
+ encode_pixels = pixels[:,:,:,:3]
+ t = vae.encode(encode_pixels)
+
+ cam_embeds = []
+ for i in range(batch_size):
+ cam_embeds.append(camera_embeddings(elevation, azimuth))
+ elevation += elevation_batch_increment
+ azimuth += azimuth_batch_increment
+
+ cam_embeds = torch.cat(cam_embeds, dim=0)
+ cond = torch.cat([ldm_patched.modules.utils.repeat_to_batch_size(pooled, batch_size), cam_embeds], dim=-1)
+
+ positive = [[cond, {"concat_latent_image": t}]]
+ negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8])
+ return (positive, negative, {"samples":latent, "batch_index": [0] * batch_size})
+
+
NODE_CLASS_MAPPINGS = {
"StableZero123_Conditioning": StableZero123_Conditioning,
+ "StableZero123_Conditioning_Batched": StableZero123_Conditioning_Batched,
}
diff --git a/ldm_patched/contrib/external_video_model.py b/ldm_patched/contrib/external_video_model.py
index 4504528..503df0e 100644
--- a/ldm_patched/contrib/external_video_model.py
+++ b/ldm_patched/contrib/external_video_model.py
@@ -5,6 +5,7 @@ import torch
import ldm_patched.modules.utils
import ldm_patched.modules.sd
import ldm_patched.utils.path_utils
+import ldm_patched.contrib.external_model_merging
class ImageOnlyCheckpointLoader:
@@ -80,10 +81,26 @@ class VideoLinearCFGGuidance:
m.set_model_sampler_cfg_function(linear_cfg)
return (m, )
+class ImageOnlyCheckpointSave(ldm_patched.contrib.external_model_merging.CheckpointSave):
+ CATEGORY = "_for_testing"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "clip_vision": ("CLIP_VISION",),
+ "vae": ("VAE",),
+ "filename_prefix": ("STRING", {"default": "checkpoints/ldm_patched"}),},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},}
+
+ def save(self, model, clip_vision, vae, filename_prefix, prompt=None, extra_pnginfo=None):
+ ldm_patched.contrib.external_model_merging.save_checkpoint(model, clip_vision=clip_vision, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
+ return {}
+
NODE_CLASS_MAPPINGS = {
"ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader,
"SVD_img2vid_Conditioning": SVD_img2vid_Conditioning,
"VideoLinearCFGGuidance": VideoLinearCFGGuidance,
+ "ImageOnlyCheckpointSave": ImageOnlyCheckpointSave,
}
NODE_DISPLAY_NAME_MAPPINGS = {
diff --git a/ldm_patched/ldm/modules/attention.py b/ldm_patched/ldm/modules/attention.py
index 49e502e..e10a868 100644
--- a/ldm_patched/ldm/modules/attention.py
+++ b/ldm_patched/ldm/modules/attention.py
@@ -1,12 +1,9 @@
-from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
-from functools import partial
-
from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
from .sub_quadratic_attention import efficient_dot_product_attention
@@ -177,6 +174,7 @@ def attention_sub_quad(query, key, value, heads, mask=None):
kv_chunk_size_min=kv_chunk_size_min,
use_checkpoint=False,
upcast_attention=upcast_attention,
+ mask=mask,
)
hidden_states = hidden_states.to(dtype)
@@ -239,6 +237,12 @@ def attention_split(q, k, v, heads, mask=None):
else:
s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale
+ if mask is not None:
+ if len(mask.shape) == 2:
+ s1 += mask[i:end]
+ else:
+ s1 += mask[:, i:end]
+
s2 = s1.softmax(dim=-1).to(v.dtype)
del s1
first_op_done = True
@@ -294,11 +298,14 @@ def attention_xformers(q, k, v, heads, mask=None):
(q, k, v),
)
- # actually compute the attention, what we cannot get enough of
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
+ if mask is not None:
+ pad = 8 - q.shape[1] % 8
+ mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device)
+ mask_out[:, :, :mask.shape[-1]] = mask
+ mask = mask_out[:, :, :mask.shape[-1]]
+
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
- if exists(mask):
- raise NotImplementedError
out = (
out.unsqueeze(0)
.reshape(b, heads, -1, dim_head)
@@ -323,7 +330,6 @@ def attention_pytorch(q, k, v, heads, mask=None):
optimized_attention = attention_basic
-optimized_attention_masked = attention_basic
if model_management.xformers_enabled():
print("Using xformers cross attention")
@@ -339,15 +345,18 @@ else:
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --attention-split")
optimized_attention = attention_sub_quad
-if model_management.pytorch_attention_enabled():
- optimized_attention_masked = attention_pytorch
+optimized_attention_masked = optimized_attention
-def optimized_attention_for_device(device, mask=False):
- if device == torch.device("cpu"): #TODO
+def optimized_attention_for_device(device, mask=False, small_input=False):
+ if small_input:
if model_management.pytorch_attention_enabled():
- return attention_pytorch
+ return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases
else:
return attention_basic
+
+ if device == torch.device("cpu"):
+ return attention_sub_quad
+
if mask:
return optimized_attention_masked
diff --git a/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py b/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py
index e5784f2..4b695f7 100644
--- a/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py
+++ b/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py
@@ -1,12 +1,9 @@
from abc import abstractmethod
-import math
-import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
-from functools import partial
from .util import (
checkpoint,
@@ -437,9 +434,6 @@ class UNetModel(nn.Module):
operations=ops,
):
super().__init__()
- assert use_spatial_transformer == True, "use_spatial_transformer has to be true"
- if use_spatial_transformer:
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
@@ -456,7 +450,6 @@ class UNetModel(nn.Module):
if num_head_channels == -1:
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
- self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
@@ -502,7 +495,7 @@ class UNetModel(nn.Module):
if self.num_classes is not None:
if isinstance(self.num_classes, int):
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim, dtype=self.dtype, device=device)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
diff --git a/ldm_patched/ldm/modules/diffusionmodules/upscaling.py b/ldm_patched/ldm/modules/diffusionmodules/upscaling.py
index 2cde80c..a38bff5 100644
--- a/ldm_patched/ldm/modules/diffusionmodules/upscaling.py
+++ b/ldm_patched/ldm/modules/diffusionmodules/upscaling.py
@@ -41,8 +41,12 @@ class AbstractLowScaleModel(nn.Module):
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
- def q_sample(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
+ def q_sample(self, x_start, t, noise=None, seed=None):
+ if noise is None:
+ if seed is None:
+ noise = torch.randn_like(x_start)
+ else:
+ noise = torch.randn(x_start.size(), dtype=x_start.dtype, layout=x_start.layout, generator=torch.manual_seed(seed)).to(x_start.device)
return (extract_into_tensor(self.sqrt_alphas_cumprod.to(x_start.device), t, x_start.shape) * x_start +
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod.to(x_start.device), t, x_start.shape) * noise)
@@ -69,12 +73,12 @@ class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
super().__init__(noise_schedule_config=noise_schedule_config)
self.max_noise_level = max_noise_level
- def forward(self, x, noise_level=None):
+ def forward(self, x, noise_level=None, seed=None):
if noise_level is None:
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
else:
assert isinstance(noise_level, torch.Tensor)
- z = self.q_sample(x, noise_level)
+ z = self.q_sample(x, noise_level, seed=seed)
return z, noise_level
diff --git a/ldm_patched/ldm/modules/encoders/noise_aug_modules.py b/ldm_patched/ldm/modules/encoders/noise_aug_modules.py
index 66767b5..a5d8660 100644
--- a/ldm_patched/ldm/modules/encoders/noise_aug_modules.py
+++ b/ldm_patched/ldm/modules/encoders/noise_aug_modules.py
@@ -23,13 +23,13 @@ class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation):
x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device)
return x
- def forward(self, x, noise_level=None):
+ def forward(self, x, noise_level=None, seed=None):
if noise_level is None:
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
else:
assert isinstance(noise_level, torch.Tensor)
x = self.scale(x)
- z = self.q_sample(x, noise_level)
+ z = self.q_sample(x, noise_level, seed=seed)
z = self.unscale(z)
noise_level = self.time_embed(noise_level)
return z, noise_level
diff --git a/ldm_patched/ldm/modules/sub_quadratic_attention.py b/ldm_patched/ldm/modules/sub_quadratic_attention.py
index cabf1f6..9f4c23c 100644
--- a/ldm_patched/ldm/modules/sub_quadratic_attention.py
+++ b/ldm_patched/ldm/modules/sub_quadratic_attention.py
@@ -61,6 +61,7 @@ def _summarize_chunk(
value: Tensor,
scale: float,
upcast_attention: bool,
+ mask,
) -> AttnChunk:
if upcast_attention:
with torch.autocast(enabled=False, device_type = 'cuda'):
@@ -84,6 +85,8 @@ def _summarize_chunk(
max_score, _ = torch.max(attn_weights, -1, keepdim=True)
max_score = max_score.detach()
attn_weights -= max_score
+ if mask is not None:
+ attn_weights += mask
torch.exp(attn_weights, out=attn_weights)
exp_weights = attn_weights.to(value.dtype)
exp_values = torch.bmm(exp_weights, value)
@@ -96,11 +99,12 @@ def _query_chunk_attention(
value: Tensor,
summarize_chunk: SummarizeChunk,
kv_chunk_size: int,
+ mask,
) -> Tensor:
batch_x_heads, k_channels_per_head, k_tokens = key_t.shape
_, _, v_channels_per_head = value.shape
- def chunk_scanner(chunk_idx: int) -> AttnChunk:
+ def chunk_scanner(chunk_idx: int, mask) -> AttnChunk:
key_chunk = dynamic_slice(
key_t,
(0, 0, chunk_idx),
@@ -111,10 +115,13 @@ def _query_chunk_attention(
(0, chunk_idx, 0),
(batch_x_heads, kv_chunk_size, v_channels_per_head)
)
- return summarize_chunk(query, key_chunk, value_chunk)
+ if mask is not None:
+ mask = mask[:,:,chunk_idx:chunk_idx + kv_chunk_size]
+
+ return summarize_chunk(query, key_chunk, value_chunk, mask=mask)
chunks: List[AttnChunk] = [
- chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
+ chunk_scanner(chunk, mask) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
]
acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks)))
chunk_values, chunk_weights, chunk_max = acc_chunk
@@ -135,6 +142,7 @@ def _get_attention_scores_no_kv_chunking(
value: Tensor,
scale: float,
upcast_attention: bool,
+ mask,
) -> Tensor:
if upcast_attention:
with torch.autocast(enabled=False, device_type = 'cuda'):
@@ -156,6 +164,8 @@ def _get_attention_scores_no_kv_chunking(
beta=0,
)
+ if mask is not None:
+ attn_scores += mask
try:
attn_probs = attn_scores.softmax(dim=-1)
del attn_scores
@@ -183,6 +193,7 @@ def efficient_dot_product_attention(
kv_chunk_size_min: Optional[int] = None,
use_checkpoint=True,
upcast_attention=False,
+ mask = None,
):
"""Computes efficient dot-product attention given query, transposed key, and value.
This is efficient version of attention presented in
@@ -209,13 +220,22 @@ def efficient_dot_product_attention(
if kv_chunk_size_min is not None:
kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
+ if mask is not None and len(mask.shape) == 2:
+ mask = mask.unsqueeze(0)
+
def get_query_chunk(chunk_idx: int) -> Tensor:
return dynamic_slice(
query,
(0, chunk_idx, 0),
(batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head)
)
-
+
+ def get_mask_chunk(chunk_idx: int) -> Tensor:
+ if mask is None:
+ return None
+ chunk = min(query_chunk_size, q_tokens)
+ return mask[:,chunk_idx:chunk_idx + chunk]
+
summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale, upcast_attention=upcast_attention)
summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
@@ -237,6 +257,7 @@ def efficient_dot_product_attention(
query=query,
key_t=key_t,
value=value,
+ mask=mask,
)
# TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance,
@@ -246,6 +267,7 @@ def efficient_dot_product_attention(
query=get_query_chunk(i * query_chunk_size),
key_t=key_t,
value=value,
+ mask=get_mask_chunk(i * query_chunk_size)
) for i in range(math.ceil(q_tokens / query_chunk_size))
], dim=1)
return res
diff --git a/ldm_patched/licenses-3rd/chainer b/ldm_patched/licenses-3rd/chainer
new file mode 100644
index 0000000..db8ef9d
--- /dev/null
+++ b/ldm_patched/licenses-3rd/chainer
@@ -0,0 +1,20 @@
+Copyright (c) 2015 Preferred Infrastructure, Inc.
+Copyright (c) 2015 Preferred Networks, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/comfyui b/ldm_patched/licenses-3rd/comfyui
new file mode 100644
index 0000000..e72bfdd
--- /dev/null
+++ b/ldm_patched/licenses-3rd/comfyui
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/diffusers b/ldm_patched/licenses-3rd/diffusers
new file mode 100644
index 0000000..f49a4e1
--- /dev/null
+++ b/ldm_patched/licenses-3rd/diffusers
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/kdiffusion b/ldm_patched/licenses-3rd/kdiffusion
new file mode 100644
index 0000000..e20684e
--- /dev/null
+++ b/ldm_patched/licenses-3rd/kdiffusion
@@ -0,0 +1,19 @@
+Copyright (c) 2022 Katherine Crowson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/ldm b/ldm_patched/licenses-3rd/ldm
new file mode 100644
index 0000000..1a1c505
--- /dev/null
+++ b/ldm_patched/licenses-3rd/ldm
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/taesd b/ldm_patched/licenses-3rd/taesd
new file mode 100644
index 0000000..62e6312
--- /dev/null
+++ b/ldm_patched/licenses-3rd/taesd
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Ollin Boer Bohan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/transformers b/ldm_patched/licenses-3rd/transformers
new file mode 100644
index 0000000..e44d8f5
--- /dev/null
+++ b/ldm_patched/licenses-3rd/transformers
@@ -0,0 +1,203 @@
+Copyright 2018- The Hugging Face team. All rights reserved.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py
index 7ffc4a8..e5b84dc 100644
--- a/ldm_patched/modules/args_parser.py
+++ b/ldm_patched/modules/args_parser.py
@@ -112,6 +112,8 @@ parser.add_argument("--is-windows-embedded-python", action="store_true")
parser.add_argument("--disable-server-info", action="store_true")
+parser.add_argument("--multi-user", action="store_true")
+
if ldm_patched.modules.options.args_parsing:
args = parser.parse_args([])
else:
diff --git a/ldm_patched/modules/clip_model.py b/ldm_patched/modules/clip_model.py
index 4c4588c..aceca86 100644
--- a/ldm_patched/modules/clip_model.py
+++ b/ldm_patched/modules/clip_model.py
@@ -57,7 +57,7 @@ class CLIPEncoder(torch.nn.Module):
self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)])
def forward(self, x, mask=None, intermediate_output=None):
- optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None)
+ optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True)
if intermediate_output is not None:
if intermediate_output < 0:
diff --git a/ldm_patched/modules/clip_vision.py b/ldm_patched/modules/clip_vision.py
index 9699210..affdb8b 100644
--- a/ldm_patched/modules/clip_vision.py
+++ b/ldm_patched/modules/clip_vision.py
@@ -1,7 +1,6 @@
-from .utils import load_torch_file, transformers_convert, common_upscale
+from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace
import os
import torch
-import contextlib
import json
import ldm_patched.modules.ops
@@ -41,9 +40,13 @@ class ClipVisionModel():
self.model.eval()
self.patcher = ldm_patched.modules.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
+
def load_sd(self, sd):
return self.model.load_state_dict(sd, strict=False)
+ def get_sd(self):
+ return self.model.state_dict()
+
def encode_image(self, image):
ldm_patched.modules.model_management.load_model_gpu(self.patcher)
pixel_values = clip_preprocess(image.to(self.load_device)).float()
@@ -76,6 +79,9 @@ def convert_to_transformers(sd, prefix):
sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1)
sd = transformers_convert(sd, prefix, "vision_model.", 48)
+ else:
+ replace_prefix = {prefix: ""}
+ sd = state_dict_prefix_replace(sd, replace_prefix)
return sd
def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
diff --git a/ldm_patched/modules/conds.py b/ldm_patched/modules/conds.py
index a732568..ed03bd6 100644
--- a/ldm_patched/modules/conds.py
+++ b/ldm_patched/modules/conds.py
@@ -1,4 +1,3 @@
-import enum
import torch
import math
import ldm_patched.modules.utils
diff --git a/ldm_patched/modules/controlnet.py b/ldm_patched/modules/controlnet.py
index a722466..7e11497 100644
--- a/ldm_patched/modules/controlnet.py
+++ b/ldm_patched/modules/controlnet.py
@@ -1,7 +1,6 @@
import torch
import math
import os
-import contextlib
import ldm_patched.modules.utils
import ldm_patched.modules.model_management
import ldm_patched.modules.model_detection
@@ -126,7 +125,10 @@ class ControlBase:
if o[i] is None:
o[i] = prev_val
else:
- o[i] += prev_val
+ if o[i].shape[0] < prev_val.shape[0]:
+ o[i] = prev_val + o[i]
+ else:
+ o[i] += prev_val
return out
class ControlNet(ControlBase):
diff --git a/ldm_patched/modules/diffusers_load.py b/ldm_patched/modules/diffusers_load.py
index 79fbbd5..62edc72 100644
--- a/ldm_patched/modules/diffusers_load.py
+++ b/ldm_patched/modules/diffusers_load.py
@@ -1,4 +1,3 @@
-import json
import os
import ldm_patched.modules.sd
diff --git a/ldm_patched/modules/gligen.py b/ldm_patched/modules/gligen.py
index 8dbd5fa..11f1ee9 100644
--- a/ldm_patched/modules/gligen.py
+++ b/ldm_patched/modules/gligen.py
@@ -1,5 +1,5 @@
import torch
-from torch import nn, einsum
+from torch import nn
from ldm_patched.ldm.modules.attention import CrossAttention
from inspect import isfunction
diff --git a/ldm_patched/modules/latent_formats.py b/ldm_patched/modules/latent_formats.py
index c209087..2252a07 100644
--- a/ldm_patched/modules/latent_formats.py
+++ b/ldm_patched/modules/latent_formats.py
@@ -33,3 +33,7 @@ class SDXL(LatentFormat):
[-0.3112, -0.2359, -0.2076]
]
self.taesd_decoder_name = "taesdxl_decoder"
+
+class SD_X4(LatentFormat):
+ def __init__(self):
+ self.scale_factor = 0.08333
diff --git a/ldm_patched/modules/model_base.py b/ldm_patched/modules/model_base.py
index c04ccb3..9c69e98 100644
--- a/ldm_patched/modules/model_base.py
+++ b/ldm_patched/modules/model_base.py
@@ -1,12 +1,11 @@
import torch
-from ldm_patched.ldm.modules.diffusionmodules.openaimodel import UNetModel
+from ldm_patched.ldm.modules.diffusionmodules.openaimodel import UNetModel, Timestep
from ldm_patched.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation
-from ldm_patched.ldm.modules.diffusionmodules.openaimodel import Timestep
+from ldm_patched.ldm.modules.diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
import ldm_patched.modules.model_management
import ldm_patched.modules.conds
import ldm_patched.modules.ops
from enum import Enum
-import contextlib
from . import utils
class ModelType(Enum):
@@ -78,8 +77,9 @@ class BaseModel(torch.nn.Module):
extra_conds = {}
for o in kwargs:
extra = kwargs[o]
- if hasattr(extra, "to"):
- extra = extra.to(dtype)
+ if hasattr(extra, "dtype"):
+ if extra.dtype != torch.int and extra.dtype != torch.long:
+ extra = extra.to(dtype)
extra_conds[o] = extra
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
@@ -99,11 +99,29 @@ class BaseModel(torch.nn.Module):
if self.inpaint_model:
concat_keys = ("mask", "masked_image")
cond_concat = []
- denoise_mask = kwargs.get("denoise_mask", None)
- latent_image = kwargs.get("latent_image", None)
+ denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None))
+ concat_latent_image = kwargs.get("concat_latent_image", None)
+ if concat_latent_image is None:
+ concat_latent_image = kwargs.get("latent_image", None)
+ else:
+ concat_latent_image = self.process_latent_in(concat_latent_image)
+
noise = kwargs.get("noise", None)
device = kwargs["device"]
+ if concat_latent_image.shape[1:] != noise.shape[1:]:
+ concat_latent_image = utils.common_upscale(concat_latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center")
+
+ concat_latent_image = utils.resize_to_batch_size(concat_latent_image, noise.shape[0])
+
+ if len(denoise_mask.shape) == len(noise.shape):
+ denoise_mask = denoise_mask[:,:1]
+
+ denoise_mask = denoise_mask.reshape((-1, 1, denoise_mask.shape[-2], denoise_mask.shape[-1]))
+ if denoise_mask.shape[-2:] != noise.shape[-2:]:
+ denoise_mask = utils.common_upscale(denoise_mask, noise.shape[-1], noise.shape[-2], "bilinear", "center")
+ denoise_mask = utils.resize_to_batch_size(denoise_mask.round(), noise.shape[0])
+
def blank_inpaint_image_like(latent_image):
blank_image = torch.ones_like(latent_image)
# these are the values for "zero" in pixel space translated to latent space
@@ -116,9 +134,9 @@ class BaseModel(torch.nn.Module):
for ck in concat_keys:
if denoise_mask is not None:
if ck == "mask":
- cond_concat.append(denoise_mask[:,:1].to(device))
+ cond_concat.append(denoise_mask.to(device))
elif ck == "masked_image":
- cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space
+ cond_concat.append(concat_latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space
else:
if ck == "mask":
cond_concat.append(torch.ones_like(noise)[:,:1])
@@ -160,19 +178,28 @@ class BaseModel(torch.nn.Module):
def process_latent_out(self, latent):
return self.latent_format.process_out(latent)
- def state_dict_for_saving(self, clip_state_dict, vae_state_dict):
- clip_state_dict = self.model_config.process_clip_state_dict_for_saving(clip_state_dict)
+ def state_dict_for_saving(self, clip_state_dict=None, vae_state_dict=None, clip_vision_state_dict=None):
+ extra_sds = []
+ if clip_state_dict is not None:
+ extra_sds.append(self.model_config.process_clip_state_dict_for_saving(clip_state_dict))
+ if vae_state_dict is not None:
+ extra_sds.append(self.model_config.process_vae_state_dict_for_saving(vae_state_dict))
+ if clip_vision_state_dict is not None:
+ extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict))
+
unet_state_dict = self.diffusion_model.state_dict()
unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict)
- vae_state_dict = self.model_config.process_vae_state_dict_for_saving(vae_state_dict)
+
if self.get_dtype() == torch.float16:
- clip_state_dict = utils.convert_sd_to(clip_state_dict, torch.float16)
- vae_state_dict = utils.convert_sd_to(vae_state_dict, torch.float16)
+ extra_sds = map(lambda sd: utils.convert_sd_to(sd, torch.float16), extra_sds)
if self.model_type == ModelType.V_PREDICTION:
unet_state_dict["v_pred"] = torch.tensor([])
- return {**unet_state_dict, **vae_state_dict, **clip_state_dict}
+ for sd in extra_sds:
+ unet_state_dict.update(sd)
+
+ return unet_state_dict
def set_inpaint(self):
self.inpaint_model = True
@@ -191,7 +218,7 @@ class BaseModel(torch.nn.Module):
return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024)
-def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0):
+def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0, seed=None):
adm_inputs = []
weights = []
noise_aug = []
@@ -200,7 +227,7 @@ def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge
weight = unclip_cond["strength"]
noise_augment = unclip_cond["noise_augmentation"]
noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment)
- c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device))
+ c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device), seed=seed)
adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight
weights.append(weight)
noise_aug.append(noise_augment)
@@ -226,11 +253,11 @@ class SD21UNCLIP(BaseModel):
if unclip_conditioning is None:
return torch.zeros((1, self.adm_channels))
else:
- return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05))
+ return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05), kwargs.get("seed", 0) - 10)
def sdxl_pooled(args, noise_augmentor):
if "unclip_conditioning" in args:
- return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor)[:,:1280]
+ return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor, seed=args.get("seed", 0) - 10)[:,:1280]
else:
return args["pooled_output"]
@@ -364,3 +391,35 @@ class Stable_Zero123(BaseModel):
cross_attn = self.cc_projection(cross_attn)
out['c_crossattn'] = ldm_patched.modules.conds.CONDCrossAttn(cross_attn)
return out
+
+class SD_X4Upscaler(BaseModel):
+ def __init__(self, model_config, model_type=ModelType.V_PREDICTION, device=None):
+ super().__init__(model_config, model_type, device=device)
+ self.noise_augmentor = ImageConcatWithNoiseAugmentation(noise_schedule_config={"linear_start": 0.0001, "linear_end": 0.02}, max_noise_level=350)
+
+ def extra_conds(self, **kwargs):
+ out = {}
+
+ image = kwargs.get("concat_image", None)
+ noise = kwargs.get("noise", None)
+ noise_augment = kwargs.get("noise_augmentation", 0.0)
+ device = kwargs["device"]
+ seed = kwargs["seed"] - 10
+
+ noise_level = round((self.noise_augmentor.max_noise_level) * noise_augment)
+
+ if image is None:
+ image = torch.zeros_like(noise)[:,:3]
+
+ if image.shape[1:] != noise.shape[1:]:
+ image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center")
+
+ noise_level = torch.tensor([noise_level], device=device)
+ if noise_augment > 0:
+ image, noise_level = self.noise_augmentor(image.to(device), noise_level=noise_level, seed=seed)
+
+ image = utils.resize_to_batch_size(image, noise.shape[0])
+
+ out['c_concat'] = ldm_patched.modules.conds.CONDNoiseShape(image)
+ out['y'] = ldm_patched.modules.conds.CONDRegular(noise_level)
+ return out
diff --git a/ldm_patched/modules/model_detection.py b/ldm_patched/modules/model_detection.py
index e8fc87a..126386c 100644
--- a/ldm_patched/modules/model_detection.py
+++ b/ldm_patched/modules/model_detection.py
@@ -34,7 +34,6 @@ def detect_unet_config(state_dict, key_prefix, dtype):
unet_config = {
"use_checkpoint": False,
"image_size": 32,
- "out_channels": 4,
"use_spatial_transformer": True,
"legacy": False
}
@@ -50,6 +49,12 @@ def detect_unet_config(state_dict, key_prefix, dtype):
model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0]
in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1]
+ out_key = '{}out.2.weight'.format(key_prefix)
+ if out_key in state_dict:
+ out_channels = state_dict[out_key].shape[0]
+ else:
+ out_channels = 4
+
num_res_blocks = []
channel_mult = []
attention_resolutions = []
@@ -122,6 +127,7 @@ def detect_unet_config(state_dict, key_prefix, dtype):
transformer_depth_middle = -1
unet_config["in_channels"] = in_channels
+ unet_config["out_channels"] = out_channels
unet_config["model_channels"] = model_channels
unet_config["num_res_blocks"] = num_res_blocks
unet_config["transformer_depth"] = transformer_depth
diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py
index 59f0f3d..6f88579 100644
--- a/ldm_patched/modules/model_management.py
+++ b/ldm_patched/modules/model_management.py
@@ -175,7 +175,7 @@ try:
if int(torch_version[0]) >= 2:
if ENABLE_PYTORCH_ATTENTION == False and args.attention_split == False and args.attention_quad == False:
ENABLE_PYTORCH_ATTENTION = True
- if torch.cuda.is_bf16_supported():
+ if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
VAE_DTYPE = torch.bfloat16
if is_intel_xpu():
if args.attention_split == False and args.attention_quad == False:
diff --git a/ldm_patched/modules/model_patcher.py b/ldm_patched/modules/model_patcher.py
index 0945a13..dd816e5 100644
--- a/ldm_patched/modules/model_patcher.py
+++ b/ldm_patched/modules/model_patcher.py
@@ -174,40 +174,41 @@ class ModelPatcher:
sd.pop(k)
return sd
- def patch_model(self, device_to=None):
+ def patch_model(self, device_to=None, patch_weights=True):
for k in self.object_patches:
old = getattr(self.model, k)
if k not in self.object_patches_backup:
self.object_patches_backup[k] = old
setattr(self.model, k, self.object_patches[k])
- model_sd = self.model_state_dict()
- for key in self.patches:
- if key not in model_sd:
- print("could not patch. key doesn't exist in model:", key)
- continue
+ if patch_weights:
+ model_sd = self.model_state_dict()
+ for key in self.patches:
+ if key not in model_sd:
+ print("could not patch. key doesn't exist in model:", key)
+ continue
- weight = model_sd[key]
+ weight = model_sd[key]
- inplace_update = self.weight_inplace_update
+ inplace_update = self.weight_inplace_update
- if key not in self.backup:
- self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update)
+ if key not in self.backup:
+ self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update)
+
+ if device_to is not None:
+ temp_weight = ldm_patched.modules.model_management.cast_to_device(weight, device_to, torch.float32, copy=True)
+ else:
+ temp_weight = weight.to(torch.float32, copy=True)
+ out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
+ if inplace_update:
+ ldm_patched.modules.utils.copy_to_param(self.model, key, out_weight)
+ else:
+ ldm_patched.modules.utils.set_attr(self.model, key, out_weight)
+ del temp_weight
if device_to is not None:
- temp_weight = ldm_patched.modules.model_management.cast_to_device(weight, device_to, torch.float32, copy=True)
- else:
- temp_weight = weight.to(torch.float32, copy=True)
- out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
- if inplace_update:
- ldm_patched.modules.utils.copy_to_param(self.model, key, out_weight)
- else:
- ldm_patched.modules.utils.set_attr(self.model, key, out_weight)
- del temp_weight
-
- if device_to is not None:
- self.model.to(device_to)
- self.current_device = device_to
+ self.model.to(device_to)
+ self.current_device = device_to
return self.model
diff --git a/ldm_patched/modules/ops.py b/ldm_patched/modules/ops.py
index 435aba5..2d7fa37 100644
--- a/ldm_patched/modules/ops.py
+++ b/ldm_patched/modules/ops.py
@@ -1,5 +1,4 @@
import torch
-from contextlib import contextmanager
import ldm_patched.modules.model_management
def cast_bias_weight(s, input):
diff --git a/ldm_patched/modules/sample.py b/ldm_patched/modules/sample.py
index b5576ce..0f48395 100644
--- a/ldm_patched/modules/sample.py
+++ b/ldm_patched/modules/sample.py
@@ -28,7 +28,6 @@ def prepare_noise(latent_image, seed, noise_inds=None):
def prepare_mask(noise_mask, shape, device):
"""ensures noise mask is of proper dimensions"""
noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
- noise_mask = noise_mask.round()
noise_mask = torch.cat([noise_mask] * shape[1], dim=1)
noise_mask = ldm_patched.modules.utils.repeat_to_batch_size(noise_mask, shape[0])
noise_mask = noise_mask.to(device)
diff --git a/ldm_patched/modules/samplers.py b/ldm_patched/modules/samplers.py
index fc17ef4..1f69d2b 100644
--- a/ldm_patched/modules/samplers.py
+++ b/ldm_patched/modules/samplers.py
@@ -1,13 +1,9 @@
from ldm_patched.k_diffusion import sampling as k_diffusion_sampling
from ldm_patched.unipc import uni_pc
import torch
-import enum
import collections
from ldm_patched.modules import model_management
import math
-from ldm_patched.modules import model_base
-import ldm_patched.modules.utils
-import ldm_patched.modules.conds
def get_area_and_mult(conds, x_in, timestep_in):
area = (x_in.shape[2], x_in.shape[3], 0, 0)
@@ -603,8 +599,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model
latent_image = model.process_latent_in(latent_image)
if hasattr(model, 'extra_conds'):
- positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask)
- negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask)
+ positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed)
+ negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed)
#make sure each cond area has an opposite one with the same area
for c in positive:
@@ -639,7 +635,7 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps):
elif scheduler_name == "sgm_uniform":
sigmas = normal_scheduler(model, steps, sgm=True)
else:
- print("error invalid scheduler", self.scheduler)
+ print("error invalid scheduler", scheduler_name)
return sigmas
def sampler_object(name):
diff --git a/ldm_patched/modules/sd.py b/ldm_patched/modules/sd.py
index 3caa92d..e197c39 100644
--- a/ldm_patched/modules/sd.py
+++ b/ldm_patched/modules/sd.py
@@ -1,9 +1,6 @@
import torch
-import contextlib
-import math
from ldm_patched.modules import model_management
-from ldm_patched.ldm.util import instantiate_from_config
from ldm_patched.ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine
import yaml
@@ -157,6 +154,8 @@ class VAE:
self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower)
self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype)
+ self.downscale_ratio = 8
+ self.latent_channels = 4
if config is None:
if "decoder.mid.block_1.mix_factor" in sd:
@@ -172,6 +171,11 @@ class VAE:
else:
#default SD1.x/SD2.x VAE parameters
ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
+
+ if 'encoder.down.2.downsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE
+ ddconfig['ch_mult'] = [1, 2, 4]
+ self.downscale_ratio = 4
+
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
else:
self.first_stage_model = AutoencoderKL(**(config['params']))
@@ -204,9 +208,9 @@ class VAE:
decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
output = torch.clamp((
- (ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) +
- ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) +
- ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar))
+ (ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) +
+ ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) +
+ ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar))
/ 3.0) / 2.0, min=0.0, max=1.0)
return output
@@ -217,9 +221,9 @@ class VAE:
pbar = ldm_patched.modules.utils.ProgressBar(steps)
encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float()
- samples = ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar)
- samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar)
- samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar)
+ samples = ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
+ samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
+ samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
samples /= 3.0
return samples
@@ -231,7 +235,7 @@ class VAE:
batch_number = int(free_memory / memory_used)
batch_number = max(1, batch_number)
- pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device=self.output_device)
+ pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * self.downscale_ratio), round(samples_in.shape[3] * self.downscale_ratio)), device=self.output_device)
for x in range(0, samples_in.shape[0], batch_number):
samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).to(self.output_device).float() + 1.0) / 2.0, min=0.0, max=1.0)
@@ -255,7 +259,7 @@ class VAE:
free_memory = model_management.get_free_memory(self.device)
batch_number = int(free_memory / memory_used)
batch_number = max(1, batch_number)
- samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device=self.output_device)
+ samples = torch.empty((pixel_samples.shape[0], self.latent_channels, round(pixel_samples.shape[2] // self.downscale_ratio), round(pixel_samples.shape[3] // self.downscale_ratio)), device=self.output_device)
for x in range(0, pixel_samples.shape[0], batch_number):
pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float()
@@ -527,7 +531,14 @@ def load_unet(unet_path):
raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
return model
-def save_checkpoint(output_path, model, clip, vae, metadata=None):
- model_management.load_models_gpu([model, clip.load_model()])
- sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
+def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None):
+ clip_sd = None
+ load_models = [model]
+ if clip is not None:
+ load_models.append(clip.load_model())
+ clip_sd = clip.get_sd()
+
+ model_management.load_models_gpu(load_models)
+ clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None
+ sd = model.model.state_dict_for_saving(clip_sd, vae.get_sd(), clip_vision_sd)
ldm_patched.modules.utils.save_torch_file(sd, output_path, metadata=metadata)
diff --git a/ldm_patched/modules/sd1_clip.py b/ldm_patched/modules/sd1_clip.py
index 736d616..3727fb4 100644
--- a/ldm_patched/modules/sd1_clip.py
+++ b/ldm_patched/modules/sd1_clip.py
@@ -6,7 +6,6 @@ import torch
import traceback
import zipfile
from . import model_management
-import contextlib
import ldm_patched.modules.clip_model
import json
diff --git a/ldm_patched/modules/supported_models.py b/ldm_patched/modules/supported_models.py
index 251bf6a..1d442d4 100644
--- a/ldm_patched/modules/supported_models.py
+++ b/ldm_patched/modules/supported_models.py
@@ -278,6 +278,33 @@ class Stable_Zero123(supported_models_base.BASE):
def clip_target(self):
return None
+class SD_X4Upscaler(SD20):
+ unet_config = {
+ "context_dim": 1024,
+ "model_channels": 256,
+ 'in_channels': 7,
+ "use_linear_in_transformer": True,
+ "adm_in_channels": None,
+ "use_temporal_attention": False,
+ }
-models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega]
+ unet_extra_config = {
+ "disable_self_attentions": [True, True, True, False],
+ "num_classes": 1000,
+ "num_heads": 8,
+ "num_head_channels": -1,
+ }
+
+ latent_format = latent_formats.SD_X4
+
+ sampling_settings = {
+ "linear_start": 0.0001,
+ "linear_end": 0.02,
+ }
+
+ def get_model(self, state_dict, prefix="", device=None):
+ out = model_base.SD_X4Upscaler(self, device=device)
+ return out
+
+models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega, SD_X4Upscaler]
models += [SVD_img2vid]
diff --git a/ldm_patched/modules/supported_models_base.py b/ldm_patched/modules/supported_models_base.py
index 49087d2..5baf4bc 100644
--- a/ldm_patched/modules/supported_models_base.py
+++ b/ldm_patched/modules/supported_models_base.py
@@ -65,6 +65,12 @@ class BASE:
replace_prefix = {"": "cond_stage_model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+ def process_clip_vision_state_dict_for_saving(self, state_dict):
+ replace_prefix = {}
+ if self.clip_vision_prefix is not None:
+ replace_prefix[""] = self.clip_vision_prefix
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
def process_unet_state_dict_for_saving(self, state_dict):
replace_prefix = {"": "model.diffusion_model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
diff --git a/ldm_patched/utils/path_utils.py b/ldm_patched/utils/path_utils.py
index d21b648..6cae149 100644
--- a/ldm_patched/utils/path_utils.py
+++ b/ldm_patched/utils/path_utils.py
@@ -29,11 +29,14 @@ folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes
folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions)
+folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")], supported_pt_extensions)
+
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
output_directory = os.path.join(os.getcwd(), "output")
temp_directory = os.path.join(os.getcwd(), "temp")
input_directory = os.path.join(os.getcwd(), "input")
+user_directory = os.path.join(os.getcwd(), "user")
filename_list_cache = {}
@@ -137,15 +140,27 @@ def recursive_search(directory, excluded_dir_names=None):
excluded_dir_names = []
result = []
- dirs = {directory: os.path.getmtime(directory)}
+ dirs = {}
+
+ # Attempt to add the initial directory to dirs with error handling
+ try:
+ dirs[directory] = os.path.getmtime(directory)
+ except FileNotFoundError:
+ print(f"Warning: Unable to access {directory}. Skipping this path.")
+
for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True):
subdirs[:] = [d for d in subdirs if d not in excluded_dir_names]
for file_name in filenames:
relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
result.append(relative_path)
+
for d in subdirs:
path = os.path.join(dirpath, d)
- dirs[path] = os.path.getmtime(path)
+ try:
+ dirs[path] = os.path.getmtime(path)
+ except FileNotFoundError:
+ print(f"Warning: Unable to access {path}. Skipping this path.")
+ continue
return result, dirs
def filter_files_extensions(files, extensions):