improve resolution handling (#396)
* improve resolution handling * improve resolution handling * improve resolution handling * improve resolution handling
This commit is contained in:
parent
6adc771888
commit
ceee6dfd73
@ -1 +1 @@
|
||||
version = '2.0.51'
|
||||
version = '2.0.52'
|
||||
|
||||
@ -23,7 +23,7 @@ def worker():
|
||||
from modules.sdxl_styles import apply_style, aspect_ratios, fooocus_expansion
|
||||
from modules.private_logger import log
|
||||
from modules.expansion import safe_str
|
||||
from modules.util import join_prompts, remove_empty_str, HWC3, resize_image
|
||||
from modules.util import join_prompts, remove_empty_str, HWC3, resize_image, image_is_generated_in_current_ui
|
||||
from modules.upscaler import perform_upscale
|
||||
|
||||
try:
|
||||
@ -82,11 +82,12 @@ def worker():
|
||||
progressbar(0, 'Image processing ...')
|
||||
if uov_method != flags.disabled and uov_input_image is not None:
|
||||
uov_input_image = HWC3(uov_input_image)
|
||||
H, W, C = uov_input_image.shape
|
||||
if 'vary' in uov_method:
|
||||
if H * W + 8 < width * height or float(abs(H * width - W * height)) > 1.5 * float(max(H, W, width, height)):
|
||||
if not image_is_generated_in_current_ui(uov_input_image, ui_width=width, ui_height=height):
|
||||
uov_input_image = resize_image(uov_input_image, width=width, height=height)
|
||||
print(f'Aspect ratio corrected - users are uploading their own images.')
|
||||
print(f'Resolution corrected - users are uploading their own images.')
|
||||
else:
|
||||
print(f'Processing images generated by Fooocus.')
|
||||
if 'subtle' in uov_method:
|
||||
denoising_strength = 0.5
|
||||
if 'strong' in uov_method:
|
||||
@ -99,6 +100,14 @@ def worker():
|
||||
height = H * 8
|
||||
print(f'Final resolution is {str((height, width))}.')
|
||||
elif 'upscale' in uov_method:
|
||||
H, W, C = uov_input_image.shape
|
||||
progressbar(0, f'Upscaling image from {str((H, W))} ...')
|
||||
|
||||
uov_input_image = core.numpy_to_pytorch(uov_input_image)
|
||||
uov_input_image = perform_upscale(uov_input_image)
|
||||
uov_input_image = core.pytorch_to_numpy(uov_input_image)[0]
|
||||
print(f'Image upscaled.')
|
||||
|
||||
if '1.5x' in uov_method:
|
||||
f = 1.5
|
||||
elif '2x' in uov_method:
|
||||
@ -106,25 +115,31 @@ def worker():
|
||||
else:
|
||||
f = 1.0
|
||||
|
||||
width = int(W * f)
|
||||
height = int(H * f)
|
||||
image_is_super_large = width * height > 2800 * 2800
|
||||
progressbar(0, f'Upscaling image from {str((H, W))} to {str((height, width))}...')
|
||||
width_f = int(width * f)
|
||||
height_f = int(height * f)
|
||||
|
||||
uov_input_image = core.numpy_to_pytorch(uov_input_image)
|
||||
uov_input_image = perform_upscale(uov_input_image)
|
||||
uov_input_image = core.pytorch_to_numpy(uov_input_image)[0]
|
||||
uov_input_image = resize_image(uov_input_image, width=width, height=height)
|
||||
print(f'Image upscaled.')
|
||||
if image_is_generated_in_current_ui(uov_input_image, ui_width=width_f, ui_height=height_f):
|
||||
uov_input_image = resize_image(uov_input_image, width=int(W * f), height=int(H * f))
|
||||
print(f'Processing images generated by Fooocus.')
|
||||
else:
|
||||
uov_input_image = resize_image(uov_input_image, width=width_f, height=height_f)
|
||||
print(f'Resolution corrected - users are uploading their own images.')
|
||||
|
||||
if 'fast' in uov_method or image_is_super_large:
|
||||
if 'fast' not in uov_method:
|
||||
H, W, C = uov_input_image.shape
|
||||
image_is_super_large = H * W > 2800 * 2800
|
||||
|
||||
if 'fast' in uov_method:
|
||||
direct_return = True
|
||||
elif image_is_super_large:
|
||||
print('Image is too large. Directly returned the SR image. '
|
||||
'Usually directly return SR image at 4K resolution '
|
||||
'yields better results than SDXL diffusion.')
|
||||
d = [
|
||||
('Upscale (Fast)', '2x'),
|
||||
]
|
||||
direct_return = True
|
||||
else:
|
||||
direct_return = False
|
||||
|
||||
if direct_return:
|
||||
d = [('Upscale (Fast)', '2x')]
|
||||
log(uov_input_image, d, single_line_number=1)
|
||||
outputs.append(['results', [uov_input_image]])
|
||||
return
|
||||
@ -135,6 +150,7 @@ def worker():
|
||||
switch = int(steps * 0.67)
|
||||
initial_pixels = core.numpy_to_pytorch(uov_input_image)
|
||||
progressbar(0, 'VAE encoding ...')
|
||||
|
||||
initial_latent = core.encode_vae(vae=pipeline.xl_base_patched.vae, pixels=initial_pixels, tiled=True)
|
||||
B, C, H, W = initial_latent['samples'].shape
|
||||
width = W * 8
|
||||
|
||||
@ -6,6 +6,25 @@ import os
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def image_is_generated_in_current_ui(image, ui_width, ui_height):
|
||||
H, W, C = image.shape
|
||||
|
||||
if H < ui_height:
|
||||
return False
|
||||
|
||||
if W < ui_width:
|
||||
return False
|
||||
|
||||
k1 = float(H) / float(W)
|
||||
k2 = float(ui_height) / float(ui_width)
|
||||
d = abs(k1 - k2)
|
||||
|
||||
if d > 0.01:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
||||
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user