diff --git a/.gitignore b/.gitignore index baeabd5..f29cafc 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ lena_result.png lena_test.py user_path_config.txt build_chb.py +experiment.py /modules/*.png /repositories /venv diff --git a/fooocus_version.py b/fooocus_version.py index d9aee23..52243a3 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.712' +version = '2.1.714' diff --git a/modules/async_worker.py b/modules/async_worker.py index 1cbb4db..d73de24 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -29,7 +29,8 @@ def worker(): from modules.sdxl_styles import apply_style, apply_wildcards, aspect_ratios, fooocus_expansion from modules.private_logger import log from modules.expansion import safe_str - from modules.util import join_prompts, remove_empty_str, HWC3, resize_image, image_is_generated_in_current_ui, make_sure_that_image_is_not_too_large + from modules.util import join_prompts, remove_empty_str, HWC3, resize_image, \ + get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil from modules.upscaler import perform_upscale try: @@ -287,11 +288,6 @@ def worker(): progressbar(13, 'Image processing ...') if 'vary' in goals: - if not image_is_generated_in_current_ui(uov_input_image, ui_width=width, ui_height=height): - uov_input_image = resize_image(uov_input_image, width=width, height=height) - print(f'Resolution corrected - users are uploading their own images.') - else: - print(f'Processing images generated by Fooocus.') if 'subtle' in uov_method: denoising_strength = 0.5 if 'strong' in uov_method: @@ -299,7 +295,16 @@ def worker(): if advanced_parameters.overwrite_vary_strength > 0: denoising_strength = advanced_parameters.overwrite_vary_strength - uov_input_image = make_sure_that_image_is_not_too_large(uov_input_image) + shape_ceil = get_image_shape_ceil(uov_input_image) + if shape_ceil < 1024: + print(f'[Vary] Image is resized because it is too small.') + shape_ceil = 1024 + elif shape_ceil > 2048: + print(f'[Vary] Image is resized because it is too big.') + shape_ceil = 2048 + + uov_input_image = set_image_shape_ceil(uov_input_image, shape_ceil) + initial_pixels = core.numpy_to_pytorch(uov_input_image) progressbar(13, 'VAE encoding ...') initial_latent = core.encode_vae(vae=pipeline.final_vae, pixels=initial_pixels) @@ -324,18 +329,12 @@ def worker(): else: f = 1.0 - width_f = int(width * f) - height_f = int(height * f) - - if image_is_generated_in_current_ui(uov_input_image, ui_width=width_f, ui_height=height_f): - uov_input_image = resize_image(uov_input_image, width=int(W * f), height=int(H * f)) - print(f'Processing images generated by Fooocus.') - else: - uov_input_image = resize_image(uov_input_image, width=width_f, height=height_f) - print(f'Resolution corrected - users are uploading their own images.') - - H, W, C = uov_input_image.shape - image_is_super_large = H * W > 2800 * 2800 + shape_ceil = get_shape_ceil(H * f, W * f) + if shape_ceil < 1024: + print(f'[Upscale] Image is resized because it is too small.') + shape_ceil = 1024 + uov_input_image = set_image_shape_ceil(uov_input_image, shape_ceil) + image_is_super_large = shape_ceil > 2800 if 'fast' in uov_method: direct_return = True diff --git a/modules/inpaint_worker.py b/modules/inpaint_worker.py index 9cc6ab1..fab5f30 100644 --- a/modules/inpaint_worker.py +++ b/modules/inpaint_worker.py @@ -3,7 +3,7 @@ import numpy as np import modules.default_pipeline as pipeline from PIL import Image, ImageFilter -from modules.util import resample_image +from modules.util import resample_image, set_image_shape_ceil inpaint_head = None @@ -148,12 +148,10 @@ class InpaintWorker: self.interested_image = image[a:b, c:d] # resize to make images ready for diffusion + self.interested_image = set_image_shape_ceil(self.interested_image, 1024) H, W, C = self.interested_image.shape - k = ((1024.0 ** 2.0) / float(H * W)) ** 0.5 - H = int(np.ceil(float(H) * k / 16.0)) * 16 - W = int(np.ceil(float(W) * k / 16.0)) * 16 + self.interested_mask = up255(resample_image(self.interested_mask, W, H), t=127) - self.interested_image = resample_image(self.interested_image, W, H) self.interested_fill = fooocus_fill(self.interested_image, self.interested_mask) # soft pixels diff --git a/modules/util.py b/modules/util.py index 9f2db5b..2bd46fd 100644 --- a/modules/util.py +++ b/modules/util.py @@ -1,30 +1,12 @@ import numpy as np import datetime import random +import math import os from PIL import Image -def image_is_generated_in_current_ui(image, ui_width, ui_height): - H, W, C = image.shape - - if H < ui_height: - return False - - if W < ui_width: - return False - - # k1 = float(H) / float(W) - # k2 = float(ui_height) / float(ui_width) - # d = abs(k1 - k2) - # - # if d > 0.01: - # return False - - return True - - LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -92,16 +74,22 @@ def resize_image(im, width, height, resize_mode=1): return np.array(res) -def make_sure_that_image_is_not_too_large(x): - H, W, C = x.shape - k = float(2048 * 2048) / float(H * W) - k = k ** 0.5 - if k < 1: - H_new = int(H * k) - W_new = int(W * k) - print(f'Image is too large - resizing from ({H}, {W}) to ({H_new}, {W_new}).') - x = resize_image(x, width=W_new, height=H_new, resize_mode=0) - return x +def get_shape_ceil(h, w): + return math.ceil(((h * w) ** 0.5) / 64.0) * 64.0 + + +def get_image_shape_ceil(im): + H, W, _ = im.shape + return get_shape_ceil(H, W) + + +def set_image_shape_ceil(im, shape_ceil): + H, W, _ = im.shape + shape_ceil_before = get_shape_ceil(H, W) + k = float(shape_ceil) / shape_ceil_before + H = int(round(float(H) * k / 64.0) * 64) + W = int(round(float(W) * k / 64.0) * 64) + return resample_image(im, width=W, height=H) def HWC3(x):