diff --git a/backend/headless/fcbh/model_sampling.py b/backend/headless/fcbh/model_sampling.py
index 0a3b19b..e1ea702 100644
--- a/backend/headless/fcbh/model_sampling.py
+++ b/backend/headless/fcbh/model_sampling.py
@@ -77,9 +77,9 @@ class ModelSamplingDiscrete(torch.nn.Module):
def percent_to_sigma(self, percent):
if percent <= 0.0:
- return torch.tensor(999999999.9)
+ return 999999999.9
if percent >= 1.0:
- return torch.tensor(0.0)
+ return 0.0
percent = 1.0 - percent
- return self.sigma(torch.tensor(percent * 999.0))
+ return self.sigma(torch.tensor(percent * 999.0)).item()
diff --git a/backend/headless/fcbh_extras/nodes_images.py b/backend/headless/fcbh_extras/nodes_images.py
new file mode 100644
index 0000000..2b8e930
--- /dev/null
+++ b/backend/headless/fcbh_extras/nodes_images.py
@@ -0,0 +1,29 @@
+import nodes
+MAX_RESOLUTION = nodes.MAX_RESOLUTION
+
+class ImageCrop:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "image": ("IMAGE",),
+ "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
+ "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
+ }}
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "crop"
+
+ CATEGORY = "image/transform"
+
+ def crop(self, image, width, height, x, y):
+ x = min(x, image.shape[2] - 1)
+ y = min(y, image.shape[1] - 1)
+ to_x = width + x
+ to_y = height + y
+ img = image[:,y:to_y, x:to_x, :]
+ return (img,)
+
+
+NODE_CLASS_MAPPINGS = {
+ "ImageCrop": ImageCrop,
+}
diff --git a/backend/headless/fcbh_extras/nodes_model_advanced.py b/backend/headless/fcbh_extras/nodes_model_advanced.py
index 04b1136..5eedb78 100644
--- a/backend/headless/fcbh_extras/nodes_model_advanced.py
+++ b/backend/headless/fcbh_extras/nodes_model_advanced.py
@@ -67,11 +67,11 @@ class ModelSamplingDiscreteLCM(torch.nn.Module):
def percent_to_sigma(self, percent):
if percent <= 0.0:
- return torch.tensor(999999999.9)
+ return 999999999.9
if percent >= 1.0:
- return torch.tensor(0.0)
+ return 0.0
percent = 1.0 - percent
- return self.sigma(torch.tensor(percent * 999.0))
+ return self.sigma(torch.tensor(percent * 999.0)).item()
def rescale_zero_terminal_snr_sigmas(sigmas):
diff --git a/backend/headless/fcbh_extras/nodes_model_downscale.py b/backend/headless/fcbh_extras/nodes_model_downscale.py
index 8850d09..f65ef05 100644
--- a/backend/headless/fcbh_extras/nodes_model_downscale.py
+++ b/backend/headless/fcbh_extras/nodes_model_downscale.py
@@ -16,8 +16,8 @@ class PatchModelAddDownscale:
CATEGORY = "_for_testing"
def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip):
- sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item()
- sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item()
+ sigma_start = model.model.model_sampling.percent_to_sigma(start_percent)
+ sigma_end = model.model.model_sampling.percent_to_sigma(end_percent)
def input_block_patch(h, transformer_options):
if transformer_options["block"][1] == block_number:
diff --git a/backend/headless/nodes.py b/backend/headless/nodes.py
index 2495932..4ab7a1a 100644
--- a/backend/headless/nodes.py
+++ b/backend/headless/nodes.py
@@ -1800,6 +1800,7 @@ def init_custom_nodes():
"nodes_hypertile.py",
"nodes_model_advanced.py",
"nodes_model_downscale.py",
+ "nodes_images.py",
]
for node_file in extras_files:
diff --git a/fooocus_version.py b/fooocus_version.py
index 6e56ff8..d36301c 100644
--- a/fooocus_version.py
+++ b/fooocus_version.py
@@ -1 +1 @@
-version = '2.1.821'
+version = '2.1.822'
diff --git a/modules/advanced_parameters.py b/modules/advanced_parameters.py
index 1547a87..ea04db6 100644
--- a/modules/advanced_parameters.py
+++ b/modules/advanced_parameters.py
@@ -2,9 +2,10 @@ disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adapt
scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
overwrite_vary_strength, overwrite_upscale_strength, \
mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, inpaint_engine, \
+ debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2 = [None] * 28
+ freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
+ debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field = [None] * 32
def set_all_advanced_parameters(*args):
@@ -12,16 +13,18 @@ def set_all_advanced_parameters(*args):
scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
overwrite_vary_strength, overwrite_upscale_strength, \
mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, inpaint_engine, \
+ debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2
+ freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
+ debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field
disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
overwrite_vary_strength, overwrite_upscale_strength, \
mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, inpaint_engine, \
+ debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2 = args
+ freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
+ debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field = args
return
diff --git a/modules/async_worker.py b/modules/async_worker.py
index b420759..70f0070 100644
--- a/modules/async_worker.py
+++ b/modules/async_worker.py
@@ -130,13 +130,14 @@ def worker():
base_model_name = args.pop()
refiner_model_name = args.pop()
refiner_switch = args.pop()
- loras = [(args.pop(), args.pop()) for _ in range(5)]
+ loras = [[str(args.pop()), float(args.pop())] for _ in range(5)]
input_image_checkbox = args.pop()
current_tab = args.pop()
uov_method = args.pop()
uov_input_image = args.pop()
outpaint_selections = args.pop()
inpaint_input_image = args.pop()
+ inpaint_additional_prompt = args.pop()
cn_tasks = {x: [] for x in flags.ip_list}
for _ in range(4):
@@ -177,7 +178,7 @@ def worker():
if performance_selection == 'Extreme Speed':
print('Enter LCM mode.')
progressbar(async_task, 1, 'Downloading LCM components ...')
- base_model_additional_loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)]
+ loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)]
if refiner_model_name != 'None':
print(f'Refiner disabled in LCM mode.')
@@ -203,8 +204,10 @@ def worker():
modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive
modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative
modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end
- print(
- f'[Parameters] ADM Scale = {modules.patch.positive_adm_scale} : {modules.patch.negative_adm_scale} : {modules.patch.adm_scaler_end}')
+ print(f'[Parameters] ADM Scale = '
+ f'{modules.patch.positive_adm_scale} : '
+ f'{modules.patch.negative_adm_scale} : '
+ f'{modules.patch.adm_scaler_end}')
cfg_scale = float(guidance_scale)
print(f'[Parameters] CFG = {cfg_scale}')
@@ -212,7 +215,6 @@ def worker():
initial_latent = None
denoising_strength = 1.0
tiled = False
- inpaint_worker.current_task = None
width, height = aspect_ratios_selection.replace('×', ' ').split(' ')[:2]
width, height = int(width), int(height)
@@ -220,9 +222,14 @@ def worker():
skip_prompt_processing = False
refiner_swap_method = advanced_parameters.refiner_swap_method
+ inpaint_worker.current_task = None
+ inpaint_parameterized = advanced_parameters.inpaint_engine != 'None'
inpaint_image = None
inpaint_mask = None
inpaint_head_model_path = None
+
+ use_synthetic_refiner = False
+
controlnet_canny_path = None
controlnet_cpds_path = None
clip_vision_path, ip_negative_path, ip_adapter_path, ip_adapter_face_path = None, None, None, None
@@ -269,11 +276,24 @@ def worker():
inpaint_image = HWC3(inpaint_image)
if isinstance(inpaint_image, np.ndarray) and isinstance(inpaint_mask, np.ndarray) \
and (np.any(inpaint_mask > 127) or len(outpaint_selections) > 0):
- progressbar(async_task, 1, 'Downloading inpainter ...')
- inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models(
- advanced_parameters.inpaint_engine)
- base_model_additional_loras += [(inpaint_patch_model_path, 1.0)]
- print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
+ if inpaint_parameterized:
+ progressbar(async_task, 1, 'Downloading inpainter ...')
+ modules.config.downloading_upscale_model()
+ inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models(
+ advanced_parameters.inpaint_engine)
+ base_model_additional_loras += [(inpaint_patch_model_path, 1.0)]
+ print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
+ if refiner_model_name == 'None':
+ use_synthetic_refiner = True
+ refiner_switch = 0.5
+ else:
+ inpaint_head_model_path, inpaint_patch_model_path = None, None
+ print(f'[Inpaint] Parameterized inpaint is disabled.')
+ if inpaint_additional_prompt != '':
+ if prompt == '':
+ prompt = inpaint_additional_prompt
+ else:
+ prompt = inpaint_additional_prompt + '\n' + prompt
goals.append('inpaint')
if current_tab == 'ip' or \
advanced_parameters.mixing_image_prompt_and_inpaint or \
@@ -332,7 +352,8 @@ def worker():
progressbar(async_task, 3, 'Loading models ...')
pipeline.refresh_everything(refiner_model_name=refiner_model_name, base_model_name=base_model_name,
- loras=loras, base_model_additional_loras=base_model_additional_loras)
+ loras=loras, base_model_additional_loras=base_model_additional_loras,
+ use_synthetic_refiner=use_synthetic_refiner)
progressbar(async_task, 3, 'Processing prompts ...')
tasks = []
@@ -375,8 +396,8 @@ def worker():
uc=None,
positive_top_k=len(positive_basic_workloads),
negative_top_k=len(negative_basic_workloads),
- log_positive_prompt='\n'.join([task_prompt] + task_extra_positive_prompts),
- log_negative_prompt='\n'.join([task_negative_prompt] + task_extra_negative_prompts),
+ log_positive_prompt='; '.join([task_prompt] + task_extra_positive_prompts),
+ log_negative_prompt='; '.join([task_negative_prompt] + task_extra_negative_prompts),
))
if use_expansion:
@@ -421,7 +442,15 @@ def worker():
initial_pixels = core.numpy_to_pytorch(uov_input_image)
progressbar(async_task, 13, 'VAE encoding ...')
- initial_latent = core.encode_vae(vae=pipeline.final_vae, pixels=initial_pixels)
+
+ candidate_vae, _ = pipeline.get_candidate_vae(
+ steps=steps,
+ switch=switch,
+ denoise=denoising_strength,
+ refiner_swap_method=refiner_swap_method
+ )
+
+ initial_latent = core.encode_vae(vae=candidate_vae, pixels=initial_pixels)
B, C, H, W = initial_latent['samples'].shape
width = W * 8
height = H * 8
@@ -430,10 +459,7 @@ def worker():
if 'upscale' in goals:
H, W, C = uov_input_image.shape
progressbar(async_task, 13, f'Upscaling image from {str((H, W))} ...')
-
- uov_input_image = core.numpy_to_pytorch(uov_input_image)
uov_input_image = perform_upscale(uov_input_image)
- uov_input_image = core.pytorch_to_numpy(uov_input_image)[0]
print(f'Image upscaled.')
if '1.5x' in uov_method:
@@ -479,14 +505,20 @@ def worker():
initial_pixels = core.numpy_to_pytorch(uov_input_image)
progressbar(async_task, 13, 'VAE encoding ...')
+ candidate_vae, _ = pipeline.get_candidate_vae(
+ steps=steps,
+ switch=switch,
+ denoise=denoising_strength,
+ refiner_swap_method=refiner_swap_method
+ )
+
initial_latent = core.encode_vae(
- vae=pipeline.final_vae if pipeline.final_refiner_vae is None else pipeline.final_refiner_vae,
+ vae=candidate_vae,
pixels=initial_pixels, tiled=True)
B, C, H, W = initial_latent['samples'].shape
width = W * 8
height = H * 8
print(f'Final resolution is {str((height, width))}.')
- refiner_swap_method = 'upscale'
if 'inpaint' in goals:
if len(outpaint_selections) > 0:
@@ -512,13 +544,19 @@ def worker():
inpaint_image = np.ascontiguousarray(inpaint_image.copy())
inpaint_mask = np.ascontiguousarray(inpaint_mask.copy())
+ advanced_parameters.inpaint_strength = 1.0
+ advanced_parameters.inpaint_respective_field = 1.0
- inpaint_worker.current_task = inpaint_worker.InpaintWorker(image=inpaint_image, mask=inpaint_mask,
- is_outpaint=len(outpaint_selections) > 0)
+ denoising_strength = advanced_parameters.inpaint_strength
- pipeline.final_unet.model.diffusion_model.in_inpaint = True
+ inpaint_worker.current_task = inpaint_worker.InpaintWorker(
+ image=inpaint_image,
+ mask=inpaint_mask,
+ use_fill=denoising_strength > 0.99,
+ k=advanced_parameters.inpaint_respective_field
+ )
- if advanced_parameters.debugging_cn_preprocessor:
+ if advanced_parameters.debugging_inpaint_preprocessor:
yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(),
do_not_show_finished_images=True)
return
@@ -529,33 +567,47 @@ def worker():
inpaint_pixel_image = core.numpy_to_pytorch(inpaint_worker.current_task.interested_image)
inpaint_pixel_mask = core.numpy_to_pytorch(inpaint_worker.current_task.interested_mask)
+ candidate_vae, candidate_vae_swap = pipeline.get_candidate_vae(
+ steps=steps,
+ switch=switch,
+ denoise=denoising_strength,
+ refiner_swap_method=refiner_swap_method
+ )
+
latent_inpaint, latent_mask = core.encode_vae_inpaint(
mask=inpaint_pixel_mask,
- vae=pipeline.final_vae,
+ vae=candidate_vae,
pixels=inpaint_pixel_image)
latent_swap = None
- if pipeline.final_refiner_vae is not None:
- progressbar(async_task, 13, 'VAE Inpaint SD15 encoding ...')
+ if candidate_vae_swap is not None:
+ progressbar(async_task, 13, 'VAE SD15 encoding ...')
latent_swap = core.encode_vae(
- vae=pipeline.final_refiner_vae,
+ vae=candidate_vae_swap,
pixels=inpaint_pixel_fill)['samples']
progressbar(async_task, 13, 'VAE encoding ...')
latent_fill = core.encode_vae(
- vae=pipeline.final_vae,
+ vae=candidate_vae,
pixels=inpaint_pixel_fill)['samples']
- inpaint_worker.current_task.load_latent(latent_fill=latent_fill,
- latent_inpaint=latent_inpaint,
- latent_mask=latent_mask,
- latent_swap=latent_swap,
- inpaint_head_model_path=inpaint_head_model_path)
+ inpaint_worker.current_task.load_latent(
+ latent_fill=latent_fill, latent_mask=latent_mask, latent_swap=latent_swap)
+
+ if inpaint_parameterized:
+ pipeline.final_unet = inpaint_worker.current_task.patch(
+ inpaint_head_model_path=inpaint_head_model_path,
+ inpaint_latent=latent_inpaint,
+ inpaint_latent_mask=latent_mask,
+ model=pipeline.final_unet
+ )
+
+ if not advanced_parameters.inpaint_disable_initial_latent:
+ initial_latent = {'samples': latent_fill}
B, C, H, W = latent_fill.shape
height, width = H * 8, W * 8
final_height, final_width = inpaint_worker.current_task.image.shape[:2]
- initial_latent = {'samples': latent_fill}
print(f'Final resolution is {str((final_height, final_width))}, latent is {str((height, width))}.')
if 'cn' in goals:
@@ -626,6 +678,15 @@ def worker():
all_steps = steps * image_number
+ print(f'[Parameters] Denoising Strength = {denoising_strength}')
+
+ if isinstance(initial_latent, dict) and 'samples' in initial_latent:
+ log_shape = initial_latent['samples'].shape
+ else:
+ log_shape = f'Image Space {(height, width)}'
+
+ print(f'[Parameters] Initial Latent shape: {log_shape}')
+
preparation_time = time.perf_counter() - execution_start_time
print(f'Preparation time: {preparation_time:.2f} seconds')
diff --git a/modules/config.py b/modules/config.py
index 43d7918..9cc6178 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -303,6 +303,15 @@ default_overwrite_switch = get_config_item_or_set_default(
default_value=-1,
validator=lambda x: isinstance(x, int)
)
+example_inpaint_prompts = get_config_item_or_set_default(
+ key='example_inpaint_prompts',
+ default_value=[
+ 'highly detailed face', 'detailed girl face', 'detailed man face', 'detailed hand', 'beautiful eyes'
+ ],
+ validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x)
+)
+
+example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))]
@@ -425,7 +434,7 @@ def downloading_sdxl_lcm_lora():
model_dir=path_loras,
file_name='sdxl_lcm_lora.safetensors'
)
- return os.path.join(path_loras, 'sdxl_lcm_lora.safetensors')
+ return 'sdxl_lcm_lora.safetensors'
def downloading_controlnet_canny():
diff --git a/modules/core.py b/modules/core.py
index 4586d7c..4d51a15 100644
--- a/modules/core.py
+++ b/modules/core.py
@@ -52,10 +52,12 @@ class StableDiffusionModel:
self.visited_loras = ''
self.lora_key_map = {}
- if self.unet is not None and self.clip is not None:
+ if self.unet is not None:
self.lora_key_map = model_lora_keys_unet(self.unet.model, self.lora_key_map)
- self.lora_key_map = model_lora_keys_clip(self.clip.cond_stage_model, self.lora_key_map)
self.lora_key_map.update({x: x for x in self.unet.model.state_dict().keys()})
+
+ if self.clip is not None:
+ self.lora_key_map = model_lora_keys_clip(self.clip.cond_stage_model, self.lora_key_map)
self.lora_key_map.update({x: x for x in self.clip.cond_stage_model.state_dict().keys()})
@torch.no_grad()
diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py
index 3330c5b..7d2b74d 100644
--- a/modules/default_pipeline.py
+++ b/modules/default_pipeline.py
@@ -194,8 +194,10 @@ def prepare_text_encoder(async_call=True):
@torch.no_grad()
@torch.inference_mode()
-def refresh_everything(refiner_model_name, base_model_name, loras, base_model_additional_loras=None):
- global final_unet, final_clip, final_vae, final_refiner_unet, final_refiner_vae, final_expansion
+def refresh_everything(refiner_model_name, base_model_name, loras,
+ base_model_additional_loras=None, use_synthetic_refiner=False):
+ global final_unet, final_clip, final_vae, final_refiner_unet, final_refiner_vae, \
+ final_expansion, model_refiner, model_base
final_unet = None
final_clip = None
@@ -203,8 +205,23 @@ def refresh_everything(refiner_model_name, base_model_name, loras, base_model_ad
final_refiner_unet = None
final_refiner_vae = None
- refresh_refiner_model(refiner_model_name)
- refresh_base_model(base_model_name)
+ if use_synthetic_refiner and refiner_model_name == 'None':
+ print('Synthetic Refiner Activated')
+ refresh_base_model(base_model_name)
+ model_refiner = core.StableDiffusionModel(
+ unet=model_base.unet,
+ vae=model_base.vae,
+ clip=model_base.clip,
+ clip_vision=model_base.clip_vision,
+ filename=model_base.filename
+ )
+ model_refiner.vae = None
+ model_refiner.clip = None
+ model_refiner.clip_vision = None
+ else:
+ refresh_refiner_model(refiner_model_name)
+ refresh_base_model(base_model_name)
+
refresh_loras(loras, base_model_additional_loras=base_model_additional_loras)
assert_model_integrity()
@@ -212,14 +229,9 @@ def refresh_everything(refiner_model_name, base_model_name, loras, base_model_ad
final_clip = model_base.clip_with_lora
final_vae = model_base.vae
- final_unet.model.diffusion_model.in_inpaint = False
-
final_refiner_unet = model_refiner.unet_with_lora
final_refiner_vae = model_refiner.vae
- if final_refiner_unet is not None:
- final_refiner_unet.model.diffusion_model.in_inpaint = False
-
if final_expansion is None:
final_expansion = FooocusExpansion()
@@ -276,32 +288,52 @@ def calculate_sigmas(sampler, model, scheduler, steps, denoise):
@torch.no_grad()
@torch.inference_mode()
-def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint'):
- global final_unet, final_refiner_unet, final_vae, final_refiner_vae
+def get_candidate_vae(steps, switch, denoise=1.0, refiner_swap_method='joint'):
+ assert refiner_swap_method in ['joint', 'separate', 'vae']
- assert refiner_swap_method in ['joint', 'separate', 'vae', 'upscale']
-
- refiner_use_different_vae = final_refiner_vae is not None and final_refiner_unet is not None
-
- if refiner_swap_method == 'upscale':
- if not refiner_use_different_vae:
- refiner_swap_method = 'joint'
- else:
- if refiner_use_different_vae:
- if denoise > 0.95:
- refiner_swap_method = 'vae'
+ if final_refiner_vae is not None and final_refiner_unet is not None:
+ if denoise > 0.9:
+ return final_vae, final_refiner_vae
+ else:
+ if denoise > (float(steps - switch) / float(steps)) ** 0.834: # karras 0.834
+ return final_vae, None
else:
- # VAE swap only support full denoise
- # Disable refiner to avoid SD15 in joint/separate swap
- final_refiner_unet = None
- final_refiner_vae = None
+ return final_refiner_vae, None
+
+ return final_vae, final_refiner_vae
+
+
+@torch.no_grad()
+@torch.inference_mode()
+def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint'):
+ target_unet, target_vae, target_refiner_unet, target_refiner_vae, target_clip \
+ = final_unet, final_vae, final_refiner_unet, final_refiner_vae, final_clip
+
+ assert refiner_swap_method in ['joint', 'separate', 'vae']
+
+ if final_refiner_vae is not None and final_refiner_unet is not None:
+ # Refiner Use Different VAE (then it is SD15)
+ if denoise > 0.9:
+ refiner_swap_method = 'vae'
+ else:
+ refiner_swap_method = 'joint'
+ if denoise > (float(steps - switch) / float(steps)) ** 0.834: # karras 0.834
+ target_unet, target_vae, target_refiner_unet, target_refiner_vae \
+ = final_unet, final_vae, None, None
+ print(f'[Sampler] only use Base because of partial denoise.')
+ else:
+ positive_cond = clip_separate(positive_cond, target_model=final_refiner_unet.model, target_clip=final_clip)
+ negative_cond = clip_separate(negative_cond, target_model=final_refiner_unet.model, target_clip=final_clip)
+ target_unet, target_vae, target_refiner_unet, target_refiner_vae \
+ = final_refiner_unet, final_refiner_vae, None, None
+ print(f'[Sampler] only use Refiner because of partial denoise.')
print(f'[Sampler] refiner_swap_method = {refiner_swap_method}')
if latent is None:
- empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
+ initial_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
else:
- empty_latent = latent
+ initial_latent = latent
minmax_sigmas = calculate_sigmas(sampler=sampler_name, scheduler=scheduler_name, model=final_unet.model, steps=steps, denoise=denoise)
sigma_min, sigma_max = minmax_sigmas[minmax_sigmas > 0].min(), minmax_sigmas.max()
@@ -310,18 +342,18 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
print(f'[Sampler] sigma_min = {sigma_min}, sigma_max = {sigma_max}')
modules.patch.BrownianTreeNoiseSamplerPatched.global_init(
- empty_latent['samples'].to(fcbh.model_management.get_torch_device()),
+ initial_latent['samples'].to(fcbh.model_management.get_torch_device()),
sigma_min, sigma_max, seed=image_seed, cpu=False)
decoded_latent = None
if refiner_swap_method == 'joint':
sampled_latent = core.ksampler(
- model=final_unet,
- refiner=final_refiner_unet,
+ model=target_unet,
+ refiner=target_refiner_unet,
positive=positive_cond,
negative=negative_cond,
- latent=empty_latent,
+ latent=initial_latent,
steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
seed=image_seed,
denoise=denoise,
@@ -333,32 +365,14 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
previewer_start=0,
previewer_end=steps,
)
- decoded_latent = core.decode_vae(vae=final_vae, latent_image=sampled_latent, tiled=tiled)
-
- if refiner_swap_method == 'upscale':
- sampled_latent = core.ksampler(
- model=final_refiner_unet,
- positive=clip_separate(positive_cond, target_model=final_refiner_unet.model, target_clip=final_clip),
- negative=clip_separate(negative_cond, target_model=final_refiner_unet.model, target_clip=final_clip),
- latent=empty_latent,
- steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
- seed=image_seed,
- denoise=denoise,
- callback_function=callback,
- cfg=cfg_scale,
- sampler_name=sampler_name,
- scheduler=scheduler_name,
- previewer_start=0,
- previewer_end=steps,
- )
- decoded_latent = core.decode_vae(vae=final_refiner_vae, latent_image=sampled_latent, tiled=tiled)
+ decoded_latent = core.decode_vae(vae=target_vae, latent_image=sampled_latent, tiled=tiled)
if refiner_swap_method == 'separate':
sampled_latent = core.ksampler(
- model=final_unet,
+ model=target_unet,
positive=positive_cond,
negative=negative_cond,
- latent=empty_latent,
+ latent=initial_latent,
steps=steps, start_step=0, last_step=switch, disable_noise=False, force_full_denoise=False,
seed=image_seed,
denoise=denoise,
@@ -371,15 +385,15 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
)
print('Refiner swapped by changing ksampler. Noise preserved.')
- target_model = final_refiner_unet
+ target_model = target_refiner_unet
if target_model is None:
- target_model = final_unet
+ target_model = target_unet
print('Use base model to refine itself - this may because of developer mode.')
sampled_latent = core.ksampler(
model=target_model,
- positive=clip_separate(positive_cond, target_model=target_model.model, target_clip=final_clip),
- negative=clip_separate(negative_cond, target_model=target_model.model, target_clip=final_clip),
+ positive=clip_separate(positive_cond, target_model=target_model.model, target_clip=target_clip),
+ negative=clip_separate(negative_cond, target_model=target_model.model, target_clip=target_clip),
latent=sampled_latent,
steps=steps, start_step=switch, last_step=steps, disable_noise=True, force_full_denoise=True,
seed=image_seed,
@@ -392,9 +406,9 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
previewer_end=steps,
)
- target_model = final_refiner_vae
+ target_model = target_refiner_vae
if target_model is None:
- target_model = final_vae
+ target_model = target_vae
decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled)
if refiner_swap_method == 'vae':
@@ -404,10 +418,10 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
modules.inpaint_worker.current_task.unswap()
sampled_latent = core.ksampler(
- model=final_unet,
+ model=target_unet,
positive=positive_cond,
negative=negative_cond,
- latent=empty_latent,
+ latent=initial_latent,
steps=steps, start_step=0, last_step=switch, disable_noise=False, force_full_denoise=True,
seed=image_seed,
denoise=denoise,
@@ -420,9 +434,9 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
)
print('Fooocus VAE-based swap.')
- target_model = final_refiner_unet
+ target_model = target_refiner_unet
if target_model is None:
- target_model = final_unet
+ target_model = target_unet
print('Use base model to refine itself - this may because of developer mode.')
sampled_latent = vae_parse(sampled_latent)
@@ -442,8 +456,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
sampled_latent = core.ksampler(
model=target_model,
- positive=clip_separate(positive_cond, target_model=target_model.model, target_clip=final_clip),
- negative=clip_separate(negative_cond, target_model=target_model.model, target_clip=final_clip),
+ positive=clip_separate(positive_cond, target_model=target_model.model, target_clip=target_clip),
+ negative=clip_separate(negative_cond, target_model=target_model.model, target_clip=target_clip),
latent=sampled_latent,
steps=len_sigmas, start_step=0, last_step=len_sigmas, disable_noise=False, force_full_denoise=True,
seed=image_seed+1,
@@ -458,9 +472,9 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
noise_mean=noise_mean
)
- target_model = final_refiner_vae
+ target_model = target_refiner_vae
if target_model is None:
- target_model = final_vae
+ target_model = target_vae
decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled)
images = core.pytorch_to_numpy(decoded_latent)
diff --git a/modules/flags.py b/modules/flags.py
index e96ab6d..2e1c099 100644
--- a/modules/flags.py
+++ b/modules/flags.py
@@ -32,5 +32,10 @@ default_parameters = {
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
} # stop, weight
-inpaint_engine_versions = ['v1', 'v2.5', 'v2.6']
+inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
performance_selections = ['Speed', 'Quality', 'Extreme Speed']
+
+inpaint_option_default = 'Inpaint or Outpaint (default)'
+inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)'
+inpaint_option_modify = 'Modify Content (add objects, change background, etc.)'
+inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option_modify]
diff --git a/modules/inpaint_worker.py b/modules/inpaint_worker.py
index 6e81765..9aa9adc 100644
--- a/modules/inpaint_worker.py
+++ b/modules/inpaint_worker.py
@@ -1,12 +1,12 @@
import torch
import numpy as np
-import modules.default_pipeline as pipeline
from PIL import Image, ImageFilter
-from modules.util import resample_image, set_image_shape_ceil
+from modules.util import resample_image, set_image_shape_ceil, get_image_shape_ceil
+from modules.upscaler import perform_upscale
-inpaint_head = None
+inpaint_head_model = None
class InpaintHead(torch.nn.Module):
@@ -77,29 +77,32 @@ def regulate_abcd(x, a, b, c, d):
def compute_initial_abcd(x):
indices = np.where(x)
- a = np.min(indices[0]) - 64
- b = np.max(indices[0]) + 65
- c = np.min(indices[1]) - 64
- d = np.max(indices[1]) + 65
+ a = np.min(indices[0])
+ b = np.max(indices[0])
+ c = np.min(indices[1])
+ d = np.max(indices[1])
abp = (b + a) // 2
abm = (b - a) // 2
cdp = (d + c) // 2
cdm = (d - c) // 2
- l = max(abm, cdm)
+ l = int(max(abm, cdm) * 1.15)
a = abp - l
- b = abp + l
+ b = abp + l + 1
c = cdp - l
- d = cdp + l
+ d = cdp + l + 1
a, b, c, d = regulate_abcd(x, a, b, c, d)
return a, b, c, d
-def solve_abcd(x, a, b, c, d, outpaint):
+def solve_abcd(x, a, b, c, d, k):
+ k = float(k)
+ assert 0.0 <= k <= 1.0
+
H, W = x.shape[:2]
- if outpaint:
+ if k == 1.0:
return 0, H, 0, W
while True:
- if b - a > H * 0.618 and d - c > W * 0.618:
+ if b - a >= H * k and d - c >= W * k:
break
add_h = (b - a) < (d - c)
@@ -138,21 +141,30 @@ def fooocus_fill(image, mask):
class InpaintWorker:
- def __init__(self, image, mask, is_outpaint):
+ def __init__(self, image, mask, use_fill=True, k=0.618):
a, b, c, d = compute_initial_abcd(mask > 0)
- a, b, c, d = solve_abcd(mask, a, b, c, d, outpaint=is_outpaint)
+ a, b, c, d = solve_abcd(mask, a, b, c, d, k=k)
# interested area
self.interested_area = (a, b, c, d)
self.interested_mask = mask[a:b, c:d]
self.interested_image = image[a:b, c:d]
+ # super resolution
+ if get_image_shape_ceil(self.interested_image) < 1024:
+ self.interested_image = perform_upscale(self.interested_image)
+
# resize to make images ready for diffusion
self.interested_image = set_image_shape_ceil(self.interested_image, 1024)
+ self.interested_fill = self.interested_image.copy()
H, W, C = self.interested_image.shape
+ # process mask
self.interested_mask = up255(resample_image(self.interested_mask, W, H), t=127)
- self.interested_fill = fooocus_fill(self.interested_image, self.interested_mask)
+
+ # compute filling
+ if use_fill:
+ self.interested_fill = fooocus_fill(self.interested_image, self.interested_mask)
# soft pixels
self.mask = morphological_open(mask)
@@ -164,36 +176,40 @@ class InpaintWorker:
self.swapped = False
self.latent_mask = None
self.inpaint_head_feature = None
+ self.processing_sampler_in = True
+ self.processing_sampler_out = True
return
- def load_latent(self,
- latent_fill,
- latent_inpaint,
- latent_mask,
- latent_swap=None,
- inpaint_head_model_path=None):
-
- global inpaint_head
- assert inpaint_head_model_path is not None
-
+ def load_latent(self, latent_fill, latent_mask, latent_swap=None):
self.latent = latent_fill
self.latent_mask = latent_mask
self.latent_after_swap = latent_swap
+ return
- if inpaint_head is None:
- inpaint_head = InpaintHead()
+ def patch(self, inpaint_head_model_path, inpaint_latent, inpaint_latent_mask, model):
+ global inpaint_head_model
+
+ if inpaint_head_model is None:
+ inpaint_head_model = InpaintHead()
sd = torch.load(inpaint_head_model_path, map_location='cpu')
- inpaint_head.load_state_dict(sd)
+ inpaint_head_model.load_state_dict(sd)
feed = torch.cat([
- latent_mask,
- pipeline.final_unet.model.process_latent_in(latent_inpaint)
+ inpaint_latent_mask,
+ model.model.process_latent_in(inpaint_latent)
], dim=1)
- inpaint_head.to(device=feed.device, dtype=feed.dtype)
- self.inpaint_head_feature = inpaint_head(feed)
+ inpaint_head_model.to(device=feed.device, dtype=feed.dtype)
+ inpaint_head_feature = inpaint_head_model(feed)
- return
+ def input_block_patch(h, transformer_options):
+ if transformer_options["block"][1] == 0:
+ h = h + inpaint_head_feature.to(h)
+ return h
+
+ m = model.clone()
+ m.set_model_input_block_patch(input_block_patch)
+ return m
def swap(self):
if self.swapped:
@@ -239,5 +255,5 @@ class InpaintWorker:
return result
def visualize_mask_processing(self):
- return [self.interested_fill, self.interested_mask, self.image, self.mask]
+ return [self.interested_fill, self.interested_mask, self.interested_image]
diff --git a/modules/patch.py b/modules/patch.py
index b32da79..6ccaf0f 100644
--- a/modules/patch.py
+++ b/modules/patch.py
@@ -304,16 +304,19 @@ def encode_token_weights_patched_with_a1111_method(self, token_weight_pairs):
def patched_KSamplerX0Inpaint_forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, model_options={}, seed=None):
if inpaint_worker.current_task is not None:
+ latent_processor = self.inner_model.inner_model.process_latent_in
+ inpaint_latent = latent_processor(inpaint_worker.current_task.latent).to(x)
+ inpaint_mask = inpaint_worker.current_task.latent_mask.to(x)
+
if getattr(self, 'energy_generator', None) is None:
# avoid bad results by using different seeds.
self.energy_generator = torch.Generator(device='cpu').manual_seed((seed + 1) % constants.MAX_SEED)
- latent_processor = self.inner_model.inner_model.process_latent_in
- inpaint_latent = latent_processor(inpaint_worker.current_task.latent).to(x)
- inpaint_mask = inpaint_worker.current_task.latent_mask.to(x)
- energy_sigma = sigma.reshape([sigma.shape[0]] + [1] * (len(x.shape) - 1))
- current_energy = torch.randn(x.size(), dtype=x.dtype, generator=self.energy_generator, device="cpu").to(x) * energy_sigma
- x = x * inpaint_mask + (inpaint_latent + current_energy) * (1.0 - inpaint_mask)
+ if inpaint_worker.current_task.processing_sampler_in:
+ energy_sigma = sigma.reshape([sigma.shape[0]] + [1] * (len(x.shape) - 1))
+ current_energy = torch.randn(
+ x.size(), dtype=x.dtype, generator=self.energy_generator, device="cpu").to(x) * energy_sigma
+ x = x * inpaint_mask + (inpaint_latent + current_energy) * (1.0 - inpaint_mask)
out = self.inner_model(x, sigma,
cond=cond,
@@ -322,7 +325,8 @@ def patched_KSamplerX0Inpaint_forward(self, x, sigma, uncond, cond, cond_scale,
model_options=model_options,
seed=seed)
- out = out * inpaint_mask + inpaint_latent * (1.0 - inpaint_mask)
+ if inpaint_worker.current_task.processing_sampler_out:
+ out = out * inpaint_mask + inpaint_latent * (1.0 - inpaint_mask)
else:
out = self.inner_model(x, sigma,
cond=cond,
@@ -403,10 +407,6 @@ def patched_unet_forward(self, x, timesteps=None, context=None, y=None, control=
self.current_step = 1.0 - timesteps.to(x) / 999.0
global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0])
- inpaint_fix = None
- if getattr(self, 'in_inpaint', False) and inpaint_worker.current_task is not None:
- inpaint_fix = inpaint_worker.current_task.inpaint_head_feature
-
transformer_options["original_shape"] = list(x.shape)
transformer_options["current_index"] = 0
transformer_patches = transformer_options.get("patches", {})
@@ -426,12 +426,6 @@ def patched_unet_forward(self, x, timesteps=None, context=None, y=None, control=
for id, module in enumerate(self.input_blocks):
transformer_options["block"] = ("input", id)
h = forward_timestep_embed(module, h, emb, context, transformer_options)
-
- if inpaint_fix is not None:
- if int(h.shape[1]) == int(inpaint_fix.shape[1]):
- h = h + inpaint_fix.to(h)
- inpaint_fix = None
-
h = apply_control(h, control, 'input')
if "input_block_patch" in transformer_patches:
patch = transformer_patches["input_block_patch"]
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 8e3a75e..c8dc29a 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -1,5 +1,6 @@
import os
import torch
+import modules.core as core
from fcbh_extras.chainner_models.architecture.RRDB import RRDBNet as ESRGAN
from fcbh_extras.nodes_upscale_model import ImageUpscaleWithModel
@@ -13,6 +14,9 @@ model = None
def perform_upscale(img):
global model
+
+ print(f'Upscaling image with shape {str(img.shape)} ...')
+
if model is None:
sd = torch.load(model_filename)
sdo = OrderedDict()
@@ -22,4 +26,9 @@ def perform_upscale(img):
model = ESRGAN(sdo)
model.cpu()
model.eval()
- return opImageUpscaleWithModel.upscale(model, img)[0]
+
+ img = core.numpy_to_pytorch(img)
+ img = opImageUpscaleWithModel.upscale(model, img)[0]
+ img = core.pytorch_to_numpy(img)[0]
+
+ return img
diff --git a/modules/util.py b/modules/util.py
index 1601f1f..fce7efd 100644
--- a/modules/util.py
+++ b/modules/util.py
@@ -79,7 +79,7 @@ def get_shape_ceil(h, w):
def get_image_shape_ceil(im):
- H, W, _ = im.shape
+ H, W = im.shape[:2]
return get_shape_ceil(H, W)
diff --git a/presets/sai.json b/presets/sai.json
index fe67c03..ac9c17d 100644
--- a/presets/sai.json
+++ b/presets/sai.json
@@ -1,7 +1,7 @@
{
"default_model": "sd_xl_base_1.0_0.9vae.safetensors",
"default_refiner": "sd_xl_refiner_1.0_0.9vae.safetensors",
- "default_refiner_switch": 0.7,
+ "default_refiner_switch": 0.75,
"default_loras": [
[
"sd_xl_offset_example-lora_1.0.safetensors",
diff --git a/update_log.md b/update_log.md
index 5e78371..d293b48 100644
--- a/update_log.md
+++ b/update_log.md
@@ -1,3 +1,7 @@
+# 2.1.822
+
+* New inpaint system (inpaint beta test ends).
+
# 2.1.821
* New UI for LoRAs.
diff --git a/webui.py b/webui.py
index d1381d5..479ac4a 100644
--- a/webui.py
+++ b/webui.py
@@ -22,6 +22,11 @@ from modules.auth import auth_enabled, check_auth
def generate_clicked(*args):
+ import fcbh.model_management as model_management
+
+ with model_management.interrupt_processing_mutex:
+ model_management.interrupt_processing = False
+
# outputs=[progress_html, progress_window, progress_gallery, gallery]
execution_start_time = time.perf_counter()
@@ -178,11 +183,15 @@ with shared.gradio_root:
outputs=ip_ad_cols + ip_types + ip_stops + ip_weights,
queue=False, show_progress=False)
- with gr.TabItem(label='Inpaint or Outpaint (beta)') as inpaint_tab:
+ with gr.TabItem(label='Inpaint or Outpaint') as inpaint_tab:
inpaint_input_image = grh.Image(label='Drag above image to here', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas')
- gr.HTML('Outpaint Expansion Direction:')
- outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint', show_label=False, container=False)
- gr.HTML('* Powered by Fooocus Inpaint Engine (beta) \U0001F4D4 Document')
+ with gr.Row():
+ inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
+ outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
+ inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.flags.inpaint_option_default, label='Method')
+ example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts, label='Additional Prompt Quick List', components=[inpaint_additional_prompt], visible=False)
+ gr.HTML('* Powered by Fooocus Inpaint Engine \U0001F4D4 Document')
+ example_inpaint_prompts.click(lambda x: x[0], inputs=example_inpaint_prompts, outputs=inpaint_additional_prompt, show_progress=False, queue=False)
switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
down_js = "() => {viewer_to_bottom();}"
@@ -297,15 +306,17 @@ with shared.gradio_root:
with gr.Row():
model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
with gr.Tab(label='Advanced'):
- sharpness = gr.Slider(label='Sampling Sharpness', minimum=0.0, maximum=30.0, step=0.001, value=modules.config.default_sample_sharpness,
+ guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
+ value=modules.config.default_cfg_scale,
+ info='Higher value means style is cleaner, vivider, and more artistic.')
+ sharpness = gr.Slider(label='Image Sharpness', minimum=0.0, maximum=30.0, step=0.001,
+ value=modules.config.default_sample_sharpness,
info='Higher value means image and texture are sharper.')
- guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01, value=modules.config.default_cfg_scale,
- info='Higher value means style is cleaner, vivider, and more artistic.')
gr.HTML('\U0001F4D4 Document')
dev_mode = gr.Checkbox(label='Developer Debug Mode', value=False, container=False)
with gr.Column(visible=False) as dev_tools:
- with gr.Tab(label='Developer Debug Tools'):
+ with gr.Tab(label='Debug Tools'):
adm_scaler_positive = gr.Slider(label='Positive ADM Guidance Scaler', minimum=0.1, maximum=3.0,
step=0.001, value=1.5, info='The scaler multiplied to positive ADM (use 1.0 to disable). ')
adm_scaler_negative = gr.Slider(label='Negative ADM Guidance Scaler', minimum=0.1, maximum=3.0,
@@ -352,14 +363,10 @@ with shared.gradio_root:
overwrite_upscale_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Upscale"',
minimum=-1, maximum=1.0, step=0.001, value=-1,
info='Set as negative number to disable. For developer debugging.')
- inpaint_engine = gr.Dropdown(label='Inpaint Engine',
- value=modules.config.default_inpaint_engine_version,
- choices=flags.inpaint_engine_versions,
- info='Version of Fooocus inpaint model')
disable_preview = gr.Checkbox(label='Disable Preview', value=False,
info='Disable preview during generation.')
- with gr.Tab(label='Control Debug'):
+ with gr.Tab(label='Control'):
debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False,
info='See the results from preprocessors.')
skipping_cn_preprocessor = gr.Checkbox(label='Skip Preprocessors', value=False,
@@ -380,6 +387,27 @@ with shared.gradio_root:
canny_high_threshold = gr.Slider(label='Canny High Threshold', minimum=1, maximum=255,
step=1, value=128)
+ with gr.Tab(label='Inpaint'):
+ debugging_inpaint_preprocessor = gr.Checkbox(label='Debug Inpaint Preprocessing', value=False)
+ inpaint_disable_initial_latent = gr.Checkbox(label='Disable initial latent in inpaint', value=False)
+ inpaint_engine = gr.Dropdown(label='Inpaint Engine',
+ value=modules.config.default_inpaint_engine_version,
+ choices=flags.inpaint_engine_versions,
+ info='Version of Fooocus inpaint model')
+ inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
+ minimum=0.0, maximum=1.0, step=0.001, value=1.0,
+ info='Same as the denoising strength in A1111 inpaint. '
+ 'Only used in inpaint, not used in outpaint. '
+ '(Outpaint always use 1.0)')
+ inpaint_respective_field = gr.Slider(label='Inpaint Respective Field',
+ minimum=0.0, maximum=1.0, step=0.001, value=0.618,
+ info='The area to inpaint. '
+ 'Value 0 is same as "Only Masked" in A1111. '
+ 'Value 1 is same as "Whole Image" in A1111. '
+ 'Only used in inpaint, not used in outpaint. '
+ '(Outpaint always use 1.0)')
+ inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field]
+
with gr.Tab(label='FreeU'):
freeu_enabled = gr.Checkbox(label='Enabled', value=False)
freeu_b1 = gr.Slider(label='B1', minimum=0, maximum=2, step=0.01, value=1.01)
@@ -392,9 +420,10 @@ with shared.gradio_root:
scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height,
overwrite_vary_strength, overwrite_upscale_strength,
mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint,
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold,
- inpaint_engine, refiner_swap_method]
+ debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness,
+ canny_low_threshold, canny_high_threshold, refiner_swap_method]
adps += freeu_ctrls
+ adps += inpaint_ctrls
def dev_mode_checked(r):
return gr.update(visible=r)
@@ -426,6 +455,39 @@ with shared.gradio_root:
queue=False, show_progress=False) \
.then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
+ def inpaint_mode_change(mode):
+ assert mode in modules.flags.inpaint_options
+
+ # inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
+ # inpaint_disable_initial_latent, inpaint_engine,
+ # inpaint_strength, inpaint_respective_field
+
+ if mode == modules.flags.inpaint_option_detail:
+ return [
+ gr.update(visible=True), gr.update(visible=False, value=[]),
+ gr.Dataset.update(visible=True, samples=modules.config.example_inpaint_prompts),
+ False, 'None', 0.5, 0.0
+ ]
+
+ if mode == modules.flags.inpaint_option_modify:
+ return [
+ gr.update(visible=True), gr.update(visible=False, value=[]),
+ gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
+ True, modules.config.default_inpaint_engine_version, 1.0, 0.0
+ ]
+
+ return [
+ gr.update(visible=False, value=''), gr.update(visible=True),
+ gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
+ False, modules.config.default_inpaint_engine_version, 1.0, 0.618
+ ]
+
+ inpaint_mode.input(inpaint_mode_change, inputs=inpaint_mode, outputs=[
+ inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
+ inpaint_disable_initial_latent, inpaint_engine,
+ inpaint_strength, inpaint_respective_field
+ ], show_progress=False, queue=False)
+
ctrls = [
prompt, negative_prompt, style_selections,
performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale
@@ -434,7 +496,7 @@ with shared.gradio_root:
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
ctrls += [input_image_checkbox, current_tab]
ctrls += [uov_method, uov_input_image]
- ctrls += [outpaint_selections, inpaint_input_image]
+ ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt]
ctrls += ip_ctrls
generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False), []), outputs=[stop_button, skip_button, generate_button, gallery]) \