Fooocus/modules/util.py
Manuel Schmid ba9eadbcda
feat: add metadata to images (#1940)
* feat: add metadata logging for images

inspired by https://github.com/MoonRide303/Fooocus-MRE

* feat: add config and checkbox for save_metadata_to_images

* feat: add argument disable_metadata

* feat: add support for A1111 metadata schema

cf2772fab0/modules/processing.py (L672)

* feat: add model hash support for a1111

* feat: use resolved prompts with included expansion and styles for a1111 metadata

* fix: code cleanup and resolved prompt fixes

* feat: add config metadata_created_by

* fix: use stting isntead of quote wrap for A1111 created_by

* fix: correctlyy hide/show metadata schema on app start

* fix: do not generate hashes when arg --disable-metadata is used

* refactor: rename metadata_schema to metadata_scheme

* fix: use pnginfo "parameters" insteadf of "Comments"

see https://github.com/RupertAvery/DiffusionToolkit/issues/202 and cf2772fab0/modules/processing.py (L939)

* feat: add resolved prompts to metadata

* fix: use correct default value in metadata check for created_by

* wip: add metadata mapping, reading and writing

applying data after reading currently not functional for A1111

* feat: rename metadata tab and import button label

* feat: map basic information for scheme A1111

* wip: optimize handling for metadata in Gradio calls

* feat: add enums for Performance, Steps and StepsUOV

also move MetadataSchema enum to prevent circular dependency

* fix: correctly map resolution, use empty styles for A1111

* chore: code cleanup

* feat: add A1111 prompt style detection

only detects one style as Fooocus doesn't wrap {prompt} with the whole style, but has a separate prompt string for each style

* wip: add prompt style extraction for A1111 scheme

* feat: sort styles after metadata import

* refactor: use central flag for LoRA count

* refactor: use central flag for ControlNet image count

* fix: use correct LoRA mapping, add fallback for backwards compatibility

* feat: add created_by again

* feat: add prefix "Fooocus" to version

* wip: code cleanup, update todos

* fix: use correct order to read LoRA in meta parser

* wip: code cleanup, update todos

* feat: make sha256 with length 10 default

* feat: add lora handling to A1111 scheme

* feat: override existing LoRA values when importing, would cause images to differ

* fix: correctly extract prompt style when only prompt expansion is selected

* feat: allow model / LoRA loading from subfolders

* feat: code cleanup, do not queue metadata preview on image upload

* refactor: add flag for refiner_swap_method

* feat: add metadata handling for all non-img2img parameters

* refactor: code cleanup

* chore: use str as return type in calculate_sha256

* feat: add hash cache to metadata

* chore: code cleanup

* feat: add method get_scheme to Metadata

* fix: align handling for scheme Fooocus by removing lcm lora from json parsing

* refactor: add step before parsing to set data in parser

- add constructor for MetadataSchema class
- remove showable and copyable from log output
- add functional hash cache (model hashing takes about 5 seconds, only required once per model, using hash lazy loading)

* feat: sort metadata attributes before writing to image

* feat: add translations and hint for image prompt parameters

* chore: check and remove ToDo's

* refactor: merge metadata.py into meta_parser.py

* fix: add missing refiner in A1111 parse_json

* wip: add TODO for ultiline prompt style resolution

* fix: remove sorting for A1111, change performance key position

fixes https://github.com/lllyasviel/Fooocus/pull/1940#issuecomment-1924444633

* fix: add workaround for multiline prompts

* feat: add sampler mapping

* feat: prevent config reset by renaming metadata_scheme to match config options

* chore: remove remaining todos after analysis

refiner is added when set
restoring multiline prompts has been resolved by using separate parameters "raw_prompt" and "raw_negative_prompt"

* chore: specify too broad exception types

* feat: add mapping for _gpu samplers to cpu samplers

gpu samplers are less deterministic than cpu but in general similar, see https://www.reddit.com/r/comfyui/comments/15hayzo/comment/juqcpep/

* feat: add better handling for image import with empty metadata

* fix: parse adaptive_cfg as float instead of string

* chore: loosen strict type for parse_json, fix indent

* chore: make steps enums more strict

* feat: only override steps if metadata value is not in steps enum or in steps enum and performance is not the same

* fix: handle empty strings in metadata

e.g. raw negative prompt when none is set
2024-02-26 14:27:57 +01:00

363 lines
12 KiB
Python

import typing
import numpy as np
import datetime
import random
import math
import os
import cv2
import json
from PIL import Image
from hashlib import sha256
import modules.sdxl_styles
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
HASH_SHA256_LENGTH = 10
def erode_or_dilate(x, k):
k = int(k)
if k > 0:
return cv2.dilate(x, kernel=np.ones(shape=(3, 3), dtype=np.uint8), iterations=k)
if k < 0:
return cv2.erode(x, kernel=np.ones(shape=(3, 3), dtype=np.uint8), iterations=-k)
return x
def resample_image(im, width, height):
im = Image.fromarray(im)
im = im.resize((int(width), int(height)), resample=LANCZOS)
return np.array(im)
def resize_image(im, width, height, resize_mode=1):
"""
Resizes an image with the specified resize_mode, width, and height.
Args:
resize_mode: The mode to use when resizing the image.
0: Resize the image to the specified width and height.
1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
im: The image to resize.
width: The width to resize the image to.
height: The height to resize the image to.
"""
im = Image.fromarray(im)
def resize(im, w, h):
return im.resize((w, h), resample=LANCZOS)
if resize_mode == 0:
res = resize(im, width, height)
elif resize_mode == 1:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio > src_ratio else im.width * height // im.height
src_h = height if ratio <= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
else:
ratio = width / height
src_ratio = im.width / im.height
src_w = width if ratio < src_ratio else im.width * height // im.height
src_h = height if ratio >= src_ratio else im.height * width // im.width
resized = resize(im, src_w, src_h)
res = Image.new("RGB", (width, height))
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
if ratio < src_ratio:
fill_height = height // 2 - src_h // 2
if fill_height > 0:
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
elif ratio > src_ratio:
fill_width = width // 2 - src_w // 2
if fill_width > 0:
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
return np.array(res)
def get_shape_ceil(h, w):
return math.ceil(((h * w) ** 0.5) / 64.0) * 64.0
def get_image_shape_ceil(im):
H, W = im.shape[:2]
return get_shape_ceil(H, W)
def set_image_shape_ceil(im, shape_ceil):
shape_ceil = float(shape_ceil)
H_origin, W_origin, _ = im.shape
H, W = H_origin, W_origin
for _ in range(256):
current_shape_ceil = get_shape_ceil(H, W)
if abs(current_shape_ceil - shape_ceil) < 0.1:
break
k = shape_ceil / current_shape_ceil
H = int(round(float(H) * k / 64.0) * 64)
W = int(round(float(W) * k / 64.0) * 64)
if H == H_origin and W == W_origin:
return im
return resample_image(im, width=W, height=H)
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def remove_empty_str(items, default=None):
items = [x for x in items if x != ""]
if len(items) == 0 and default is not None:
return [default]
return items
def join_prompts(*args, **kwargs):
prompts = [str(x) for x in args if str(x) != ""]
if len(prompts) == 0:
return ""
if len(prompts) == 1:
return prompts[0]
return ', '.join(prompts)
def generate_temp_filename(folder='./outputs/', extension='png'):
current_time = datetime.datetime.now()
date_string = current_time.strftime("%Y-%m-%d")
time_string = current_time.strftime("%Y-%m-%d_%H-%M-%S")
random_number = random.randint(1000, 9999)
filename = f"{time_string}_{random_number}.{extension}"
result = os.path.join(folder, date_string, filename)
return date_string, os.path.abspath(os.path.realpath(result)), filename
def get_files_from_folder(folder_path, exensions=None, name_filter=None):
if not os.path.isdir(folder_path):
raise ValueError("Folder path is not a valid directory.")
filenames = []
for root, dirs, files in os.walk(folder_path, topdown=False):
relative_path = os.path.relpath(root, folder_path)
if relative_path == ".":
relative_path = ""
for filename in sorted(files, key=lambda s: s.casefold()):
_, file_extension = os.path.splitext(filename)
if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _):
path = os.path.join(relative_path, filename)
filenames.append(path)
return filenames
def calculate_sha256(filename, length=HASH_SHA256_LENGTH) -> str:
hash_sha256 = sha256()
blksize = 1024 * 1024
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(blksize), b""):
hash_sha256.update(chunk)
res = hash_sha256.hexdigest()
return res[:length] if length else res
def quote(text):
if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
return text
return json.dumps(text, ensure_ascii=False)
def unquote(text):
if len(text) == 0 or text[0] != '"' or text[-1] != '"':
return text
try:
return json.loads(text)
except Exception:
return text
def unwrap_style_text_from_prompt(style_text, prompt):
"""
Checks the prompt to see if the style text is wrapped around it. If so,
returns True plus the prompt text without the style text. Otherwise, returns
False with the original prompt.
Note that the "cleaned" version of the style text is only used for matching
purposes here. It isn't returned; the original style text is not modified.
"""
stripped_prompt = prompt
stripped_style_text = style_text
if "{prompt}" in stripped_style_text:
# Work out whether the prompt is wrapped in the style text. If so, we
# return True and the "inner" prompt text that isn't part of the style.
try:
left, right = stripped_style_text.split("{prompt}", 2)
except ValueError as e:
# If the style text has multple "{prompt}"s, we can't split it into
# two parts. This is an error, but we can't do anything about it.
print(f"Unable to compare style text to prompt:\n{style_text}")
print(f"Error: {e}")
return False, prompt, ''
left_pos = stripped_prompt.find(left)
right_pos = stripped_prompt.find(right)
if 0 <= left_pos < right_pos:
real_prompt = stripped_prompt[left_pos + len(left):right_pos]
prompt = stripped_prompt.replace(left + real_prompt + right, '', 1)
if prompt.startswith(", "):
prompt = prompt[2:]
if prompt.endswith(", "):
prompt = prompt[:-2]
return True, prompt, real_prompt
else:
# Work out whether the given prompt starts with the style text. If so, we
# return True and the prompt text up to where the style text starts.
if stripped_prompt.endswith(stripped_style_text):
prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)]
if prompt.endswith(", "):
prompt = prompt[:-2]
return True, prompt, prompt
return False, prompt, ''
def extract_original_prompts(style, prompt, negative_prompt):
"""
Takes a style and compares it to the prompt and negative prompt. If the style
matches, returns True plus the prompt and negative prompt with the style text
removed. Otherwise, returns False with the original prompt and negative prompt.
"""
if not style.prompt and not style.negative_prompt:
return False, prompt, negative_prompt
match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt(
style.prompt, prompt
)
if not match_positive:
return False, prompt, negative_prompt, ''
match_negative, extracted_negative, _ = unwrap_style_text_from_prompt(
style.negative_prompt, negative_prompt
)
if not match_negative:
return False, prompt, negative_prompt, ''
return True, extracted_positive, extracted_negative, real_prompt
def extract_styles_from_prompt(prompt, negative_prompt):
extracted = []
applicable_styles = []
for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items():
applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt))
real_prompt = ''
while True:
found_style = None
for style in applicable_styles:
is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts(
style, prompt, negative_prompt
)
if is_match:
found_style = style
prompt = new_prompt
negative_prompt = new_neg_prompt
if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt:
real_prompt = new_real_prompt
break
if not found_style:
break
applicable_styles.remove(found_style)
extracted.append(found_style.name)
# add prompt expansion if not all styles could be resolved
if prompt != '':
if real_prompt != '':
extracted.append(modules.sdxl_styles.fooocus_expansion)
else:
# find real_prompt when only prompt expansion is selected
first_word = prompt.split(', ')[0]
first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)]
if len(first_word_positions) > 1:
real_prompt = prompt[:first_word_positions[-1]]
extracted.append(modules.sdxl_styles.fooocus_expansion)
if real_prompt.endswith(', '):
real_prompt = real_prompt[:-2]
return list(reversed(extracted)), real_prompt, negative_prompt
class PromptStyle(typing.NamedTuple):
name: str
prompt: str
negative_prompt: str
def is_json(data: str) -> bool:
try:
loaded_json = json.loads(data)
assert isinstance(loaded_json, dict)
except (ValueError, AssertionError):
return False
return True
def get_file_from_folder_list(name, folders):
for folder in folders:
filename = os.path.abspath(os.path.realpath(os.path.join(folder, name)))
if os.path.isfile(filename):
return filename
return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
def ordinal_suffix(number: int) -> str:
return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th')
def makedirs_with_log(path):
try:
os.makedirs(path, exist_ok=True)
except OSError as error:
print(f'Directory {path} could not be created, reason: {error}')