* feat: add metadata logging for images inspired by https://github.com/MoonRide303/Fooocus-MRE * feat: add config and checkbox for save_metadata_to_images * feat: add argument disable_metadata * feat: add support for A1111 metadata schemacf2772fab0/modules/processing.py (L672)
* feat: add model hash support for a1111 * feat: use resolved prompts with included expansion and styles for a1111 metadata * fix: code cleanup and resolved prompt fixes * feat: add config metadata_created_by * fix: use stting isntead of quote wrap for A1111 created_by * fix: correctlyy hide/show metadata schema on app start * fix: do not generate hashes when arg --disable-metadata is used * refactor: rename metadata_schema to metadata_scheme * fix: use pnginfo "parameters" insteadf of "Comments" see https://github.com/RupertAvery/DiffusionToolkit/issues/202 andcf2772fab0/modules/processing.py (L939)
* feat: add resolved prompts to metadata * fix: use correct default value in metadata check for created_by * wip: add metadata mapping, reading and writing applying data after reading currently not functional for A1111 * feat: rename metadata tab and import button label * feat: map basic information for scheme A1111 * wip: optimize handling for metadata in Gradio calls * feat: add enums for Performance, Steps and StepsUOV also move MetadataSchema enum to prevent circular dependency * fix: correctly map resolution, use empty styles for A1111 * chore: code cleanup * feat: add A1111 prompt style detection only detects one style as Fooocus doesn't wrap {prompt} with the whole style, but has a separate prompt string for each style * wip: add prompt style extraction for A1111 scheme * feat: sort styles after metadata import * refactor: use central flag for LoRA count * refactor: use central flag for ControlNet image count * fix: use correct LoRA mapping, add fallback for backwards compatibility * feat: add created_by again * feat: add prefix "Fooocus" to version * wip: code cleanup, update todos * fix: use correct order to read LoRA in meta parser * wip: code cleanup, update todos * feat: make sha256 with length 10 default * feat: add lora handling to A1111 scheme * feat: override existing LoRA values when importing, would cause images to differ * fix: correctly extract prompt style when only prompt expansion is selected * feat: allow model / LoRA loading from subfolders * feat: code cleanup, do not queue metadata preview on image upload * refactor: add flag for refiner_swap_method * feat: add metadata handling for all non-img2img parameters * refactor: code cleanup * chore: use str as return type in calculate_sha256 * feat: add hash cache to metadata * chore: code cleanup * feat: add method get_scheme to Metadata * fix: align handling for scheme Fooocus by removing lcm lora from json parsing * refactor: add step before parsing to set data in parser - add constructor for MetadataSchema class - remove showable and copyable from log output - add functional hash cache (model hashing takes about 5 seconds, only required once per model, using hash lazy loading) * feat: sort metadata attributes before writing to image * feat: add translations and hint for image prompt parameters * chore: check and remove ToDo's * refactor: merge metadata.py into meta_parser.py * fix: add missing refiner in A1111 parse_json * wip: add TODO for ultiline prompt style resolution * fix: remove sorting for A1111, change performance key position fixes https://github.com/lllyasviel/Fooocus/pull/1940#issuecomment-1924444633 * fix: add workaround for multiline prompts * feat: add sampler mapping * feat: prevent config reset by renaming metadata_scheme to match config options * chore: remove remaining todos after analysis refiner is added when set restoring multiline prompts has been resolved by using separate parameters "raw_prompt" and "raw_negative_prompt" * chore: specify too broad exception types * feat: add mapping for _gpu samplers to cpu samplers gpu samplers are less deterministic than cpu but in general similar, see https://www.reddit.com/r/comfyui/comments/15hayzo/comment/juqcpep/ * feat: add better handling for image import with empty metadata * fix: parse adaptive_cfg as float instead of string * chore: loosen strict type for parse_json, fix indent * chore: make steps enums more strict * feat: only override steps if metadata value is not in steps enum or in steps enum and performance is not the same * fix: handle empty strings in metadata e.g. raw negative prompt when none is set
516 lines
19 KiB
Python
516 lines
19 KiB
Python
import json
|
|
import os
|
|
import re
|
|
from abc import ABC, abstractmethod
|
|
from pathlib import Path
|
|
|
|
import gradio as gr
|
|
from PIL import Image
|
|
|
|
import modules.config
|
|
import modules.sdxl_styles
|
|
from modules.flags import MetadataScheme, Performance, Steps
|
|
from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS
|
|
from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, calculate_sha256
|
|
|
|
re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
|
|
re_param = re.compile(re_param_code)
|
|
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
|
|
|
|
hash_cache = {}
|
|
|
|
|
|
def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|
loaded_parameter_dict = raw_metadata
|
|
if isinstance(raw_metadata, str):
|
|
loaded_parameter_dict = json.loads(raw_metadata)
|
|
assert isinstance(loaded_parameter_dict, dict)
|
|
|
|
results = [len(loaded_parameter_dict) > 0, 1]
|
|
|
|
get_str('prompt', 'Prompt', loaded_parameter_dict, results)
|
|
get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results)
|
|
get_list('styles', 'Styles', loaded_parameter_dict, results)
|
|
get_str('performance', 'Performance', loaded_parameter_dict, results)
|
|
get_steps('steps', 'Steps', loaded_parameter_dict, results)
|
|
get_float('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results)
|
|
get_resolution('resolution', 'Resolution', loaded_parameter_dict, results)
|
|
get_float('guidance_scale', 'Guidance Scale', loaded_parameter_dict, results)
|
|
get_float('sharpness', 'Sharpness', loaded_parameter_dict, results)
|
|
get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results)
|
|
get_str('refiner_swap_method', 'Refiner Swap Method', loaded_parameter_dict, results)
|
|
get_float('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results)
|
|
get_str('base_model', 'Base Model', loaded_parameter_dict, results)
|
|
get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results)
|
|
get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results)
|
|
get_str('sampler', 'Sampler', loaded_parameter_dict, results)
|
|
get_str('scheduler', 'Scheduler', loaded_parameter_dict, results)
|
|
get_seed('seed', 'Seed', loaded_parameter_dict, results)
|
|
|
|
if is_generating:
|
|
results.append(gr.update())
|
|
else:
|
|
results.append(gr.update(visible=True))
|
|
|
|
results.append(gr.update(visible=False))
|
|
|
|
get_freeu('freeu', 'FreeU', loaded_parameter_dict, results)
|
|
|
|
for i in range(modules.config.default_max_lora_number):
|
|
get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results)
|
|
|
|
return results
|
|
|
|
|
|
def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
assert isinstance(h, str)
|
|
results.append(h)
|
|
except:
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
h = eval(h)
|
|
assert isinstance(h, list)
|
|
results.append(h)
|
|
except:
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_float(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
assert h is not None
|
|
h = float(h)
|
|
results.append(h)
|
|
except:
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
assert h is not None
|
|
h = int(h)
|
|
# if not in steps or in steps and performance is not the same
|
|
if h not in iter(Steps) or Steps(h).name.casefold() != source_dict.get('performance', '').replace(' ', '_').casefold():
|
|
results.append(h)
|
|
return
|
|
results.append(-1)
|
|
except:
|
|
results.append(-1)
|
|
|
|
|
|
def get_resolution(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
width, height = eval(h)
|
|
formatted = modules.config.add_ratio(f'{width}*{height}')
|
|
if formatted in modules.config.available_aspect_ratios:
|
|
results.append(formatted)
|
|
results.append(-1)
|
|
results.append(-1)
|
|
else:
|
|
results.append(gr.update())
|
|
results.append(width)
|
|
results.append(height)
|
|
except:
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
assert h is not None
|
|
h = int(h)
|
|
results.append(False)
|
|
results.append(h)
|
|
except:
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
p, n, e = eval(h)
|
|
results.append(float(p))
|
|
results.append(float(n))
|
|
results.append(float(e))
|
|
except:
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
|
try:
|
|
h = source_dict.get(key, source_dict.get(fallback, default))
|
|
b1, b2, s1, s2 = eval(h)
|
|
results.append(True)
|
|
results.append(float(b1))
|
|
results.append(float(b2))
|
|
results.append(float(s1))
|
|
results.append(float(s2))
|
|
except:
|
|
results.append(False)
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
results.append(gr.update())
|
|
|
|
|
|
def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
|
|
try:
|
|
n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ')
|
|
w = float(w)
|
|
results.append(True)
|
|
results.append(n)
|
|
results.append(w)
|
|
except:
|
|
results.append(True)
|
|
results.append('None')
|
|
results.append(1)
|
|
|
|
|
|
def get_sha256(filepath):
|
|
global hash_cache
|
|
|
|
if filepath not in hash_cache:
|
|
hash_cache[filepath] = calculate_sha256(filepath)
|
|
|
|
return hash_cache[filepath]
|
|
|
|
|
|
class MetadataParser(ABC):
|
|
def __init__(self):
|
|
self.raw_prompt: str = ''
|
|
self.full_prompt: str = ''
|
|
self.raw_negative_prompt: str = ''
|
|
self.full_negative_prompt: str = ''
|
|
self.steps: int = 30
|
|
self.base_model_name: str = ''
|
|
self.base_model_hash: str = ''
|
|
self.refiner_model_name: str = ''
|
|
self.refiner_model_hash: str = ''
|
|
self.loras: list = []
|
|
|
|
@abstractmethod
|
|
def get_scheme(self) -> MetadataScheme:
|
|
raise NotImplementedError
|
|
|
|
@abstractmethod
|
|
def parse_json(self, metadata: dict | str) -> dict:
|
|
raise NotImplementedError
|
|
|
|
@abstractmethod
|
|
def parse_string(self, metadata: dict) -> str:
|
|
raise NotImplementedError
|
|
|
|
def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras):
|
|
self.raw_prompt = raw_prompt
|
|
self.full_prompt = full_prompt
|
|
self.raw_negative_prompt = raw_negative_prompt
|
|
self.full_negative_prompt = full_negative_prompt
|
|
self.steps = steps
|
|
self.base_model_name = Path(base_model_name).stem
|
|
|
|
base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints)
|
|
self.base_model_hash = get_sha256(base_model_path)
|
|
|
|
if refiner_model_name not in ['', 'None']:
|
|
self.refiner_model_name = Path(refiner_model_name).stem
|
|
refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints)
|
|
self.refiner_model_hash = get_sha256(refiner_model_path)
|
|
|
|
self.loras = []
|
|
for (lora_name, lora_weight) in loras:
|
|
if lora_name != 'None':
|
|
lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras)
|
|
lora_hash = get_sha256(lora_path)
|
|
self.loras.append((Path(lora_name).stem, lora_weight, lora_hash))
|
|
|
|
|
|
class A1111MetadataParser(MetadataParser):
|
|
def get_scheme(self) -> MetadataScheme:
|
|
return MetadataScheme.A1111
|
|
|
|
fooocus_to_a1111 = {
|
|
'raw_prompt': 'Raw prompt',
|
|
'raw_negative_prompt': 'Raw negative prompt',
|
|
'negative_prompt': 'Negative prompt',
|
|
'styles': 'Styles',
|
|
'performance': 'Performance',
|
|
'steps': 'Steps',
|
|
'sampler': 'Sampler',
|
|
'scheduler': 'Scheduler',
|
|
'guidance_scale': 'CFG scale',
|
|
'seed': 'Seed',
|
|
'resolution': 'Size',
|
|
'sharpness': 'Sharpness',
|
|
'adm_guidance': 'ADM Guidance',
|
|
'refiner_swap_method': 'Refiner Swap Method',
|
|
'adaptive_cfg': 'Adaptive CFG',
|
|
'overwrite_switch': 'Overwrite Switch',
|
|
'freeu': 'FreeU',
|
|
'base_model': 'Model',
|
|
'base_model_hash': 'Model hash',
|
|
'refiner_model': 'Refiner',
|
|
'refiner_model_hash': 'Refiner hash',
|
|
'lora_hashes': 'Lora hashes',
|
|
'lora_weights': 'Lora weights',
|
|
'created_by': 'User',
|
|
'version': 'Version'
|
|
}
|
|
|
|
def parse_json(self, metadata: str) -> dict:
|
|
metadata_prompt = ''
|
|
metadata_negative_prompt = ''
|
|
|
|
done_with_prompt = False
|
|
|
|
*lines, lastline = metadata.strip().split("\n")
|
|
if len(re_param.findall(lastline)) < 3:
|
|
lines.append(lastline)
|
|
lastline = ''
|
|
|
|
for line in lines:
|
|
line = line.strip()
|
|
if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"):
|
|
done_with_prompt = True
|
|
line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip()
|
|
if done_with_prompt:
|
|
metadata_negative_prompt += ('' if metadata_negative_prompt == '' else "\n") + line
|
|
else:
|
|
metadata_prompt += ('' if metadata_prompt == '' else "\n") + line
|
|
|
|
found_styles, prompt, negative_prompt = extract_styles_from_prompt(metadata_prompt, metadata_negative_prompt)
|
|
|
|
data = {
|
|
'prompt': prompt,
|
|
'negative_prompt': negative_prompt
|
|
}
|
|
|
|
for k, v in re_param.findall(lastline):
|
|
try:
|
|
if v != '' and v[0] == '"' and v[-1] == '"':
|
|
v = unquote(v)
|
|
|
|
m = re_imagesize.match(v)
|
|
if m is not None:
|
|
data['resolution'] = str((m.group(1), m.group(2)))
|
|
else:
|
|
data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v
|
|
except Exception:
|
|
print(f"Error parsing \"{k}: {v}\"")
|
|
|
|
# workaround for multiline prompts
|
|
if 'raw_prompt' in data:
|
|
data['prompt'] = data['raw_prompt']
|
|
raw_prompt = data['raw_prompt'].replace("\n", ', ')
|
|
if metadata_prompt != raw_prompt and modules.sdxl_styles.fooocus_expansion not in found_styles:
|
|
found_styles.append(modules.sdxl_styles.fooocus_expansion)
|
|
|
|
if 'raw_negative_prompt' in data:
|
|
data['negative_prompt'] = data['raw_negative_prompt']
|
|
|
|
data['styles'] = str(found_styles)
|
|
|
|
# try to load performance based on steps, fallback for direct A1111 imports
|
|
if 'steps' in data and 'performance' not in data:
|
|
try:
|
|
data['performance'] = Performance[Steps(int(data['steps'])).name].value
|
|
except ValueError | KeyError:
|
|
pass
|
|
|
|
if 'sampler' in data:
|
|
data['sampler'] = data['sampler'].replace(' Karras', '')
|
|
# get key
|
|
for k, v in SAMPLERS.items():
|
|
if v == data['sampler']:
|
|
data['sampler'] = k
|
|
break
|
|
|
|
for key in ['base_model', 'refiner_model']:
|
|
if key in data:
|
|
for filename in modules.config.model_filenames:
|
|
path = Path(filename)
|
|
if data[key] == path.stem:
|
|
data[key] = filename
|
|
break
|
|
|
|
if 'lora_hashes' in data:
|
|
lora_filenames = modules.config.lora_filenames.copy()
|
|
lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora())
|
|
for li, lora in enumerate(data['lora_hashes'].split(', ')):
|
|
lora_name, lora_hash, lora_weight = lora.split(': ')
|
|
for filename in lora_filenames:
|
|
path = Path(filename)
|
|
if lora_name == path.stem:
|
|
data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}'
|
|
break
|
|
|
|
return data
|
|
|
|
def parse_string(self, metadata: dict) -> str:
|
|
data = {k: v for _, k, v in metadata}
|
|
|
|
width, height = eval(data['resolution'])
|
|
|
|
sampler = data['sampler']
|
|
scheduler = data['scheduler']
|
|
if sampler in SAMPLERS and SAMPLERS[sampler] != '':
|
|
sampler = SAMPLERS[sampler]
|
|
if sampler not in CIVITAI_NO_KARRAS and scheduler == 'karras':
|
|
sampler += f' Karras'
|
|
|
|
generation_params = {
|
|
self.fooocus_to_a1111['steps']: self.steps,
|
|
self.fooocus_to_a1111['sampler']: sampler,
|
|
self.fooocus_to_a1111['seed']: data['seed'],
|
|
self.fooocus_to_a1111['resolution']: f'{width}x{height}',
|
|
self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'],
|
|
self.fooocus_to_a1111['sharpness']: data['sharpness'],
|
|
self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'],
|
|
self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem,
|
|
self.fooocus_to_a1111['base_model_hash']: self.base_model_hash,
|
|
|
|
self.fooocus_to_a1111['performance']: data['performance'],
|
|
self.fooocus_to_a1111['scheduler']: scheduler,
|
|
# workaround for multiline prompts
|
|
self.fooocus_to_a1111['raw_prompt']: self.raw_prompt,
|
|
self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt,
|
|
}
|
|
|
|
if self.refiner_model_name not in ['', 'None']:
|
|
generation_params |= {
|
|
self.fooocus_to_a1111['refiner_model']: self.refiner_model_name,
|
|
self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash
|
|
}
|
|
|
|
for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']:
|
|
if key in data:
|
|
generation_params[self.fooocus_to_a1111[key]] = data[key]
|
|
|
|
lora_hashes = []
|
|
for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras):
|
|
# workaround for Fooocus not knowing LoRA name in LoRA metadata
|
|
lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}')
|
|
lora_hashes_string = ', '.join(lora_hashes)
|
|
|
|
generation_params |= {
|
|
self.fooocus_to_a1111['lora_hashes']: lora_hashes_string,
|
|
self.fooocus_to_a1111['version']: data['version']
|
|
}
|
|
|
|
if modules.config.metadata_created_by != '':
|
|
generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by
|
|
|
|
generation_params_text = ", ".join(
|
|
[k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if
|
|
v is not None])
|
|
positive_prompt_resolved = ', '.join(self.full_prompt)
|
|
negative_prompt_resolved = ', '.join(self.full_negative_prompt)
|
|
negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else ""
|
|
return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip()
|
|
|
|
|
|
class FooocusMetadataParser(MetadataParser):
|
|
def get_scheme(self) -> MetadataScheme:
|
|
return MetadataScheme.FOOOCUS
|
|
|
|
def parse_json(self, metadata: dict) -> dict:
|
|
model_filenames = modules.config.model_filenames.copy()
|
|
lora_filenames = modules.config.lora_filenames.copy()
|
|
lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora())
|
|
|
|
for key, value in metadata.items():
|
|
if value in ['', 'None']:
|
|
continue
|
|
if key in ['base_model', 'refiner_model']:
|
|
metadata[key] = self.replace_value_with_filename(key, value, model_filenames)
|
|
elif key.startswith('lora_combined_'):
|
|
metadata[key] = self.replace_value_with_filename(key, value, lora_filenames)
|
|
else:
|
|
continue
|
|
|
|
return metadata
|
|
|
|
def parse_string(self, metadata: list) -> str:
|
|
for li, (label, key, value) in enumerate(metadata):
|
|
# remove model folder paths from metadata
|
|
if key.startswith('lora_combined_'):
|
|
name, weight = value.split(' : ')
|
|
name = Path(name).stem
|
|
value = f'{name} : {weight}'
|
|
metadata[li] = (label, key, value)
|
|
|
|
res = {k: v for _, k, v in metadata}
|
|
|
|
res['full_prompt'] = self.full_prompt
|
|
res['full_negative_prompt'] = self.full_negative_prompt
|
|
res['steps'] = self.steps
|
|
res['base_model'] = self.base_model_name
|
|
res['base_model_hash'] = self.base_model_hash
|
|
|
|
if self.refiner_model_name not in ['', 'None']:
|
|
res['refiner_model'] = self.refiner_model_name
|
|
res['refiner_model_hash'] = self.refiner_model_hash
|
|
|
|
res['loras'] = self.loras
|
|
|
|
if modules.config.metadata_created_by != '':
|
|
res['created_by'] = modules.config.metadata_created_by
|
|
|
|
return json.dumps(dict(sorted(res.items())))
|
|
|
|
@staticmethod
|
|
def replace_value_with_filename(key, value, filenames):
|
|
for filename in filenames:
|
|
path = Path(filename)
|
|
if key.startswith('lora_combined_'):
|
|
name, weight = value.split(' : ')
|
|
if name == path.stem:
|
|
return f'{filename} : {weight}'
|
|
elif value == path.stem:
|
|
return filename
|
|
|
|
|
|
def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser:
|
|
match metadata_scheme:
|
|
case MetadataScheme.FOOOCUS:
|
|
return FooocusMetadataParser()
|
|
case MetadataScheme.A1111:
|
|
return A1111MetadataParser()
|
|
case _:
|
|
raise NotImplementedError
|
|
|
|
|
|
def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]:
|
|
with Image.open(filepath) as image:
|
|
items = (image.info or {}).copy()
|
|
|
|
parameters = items.pop('parameters', None)
|
|
if parameters is not None and is_json(parameters):
|
|
parameters = json.loads(parameters)
|
|
|
|
try:
|
|
metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None))
|
|
except ValueError:
|
|
metadata_scheme = None
|
|
|
|
# broad fallback
|
|
if isinstance(parameters, dict):
|
|
metadata_scheme = MetadataScheme.FOOOCUS
|
|
|
|
if isinstance(parameters, str):
|
|
metadata_scheme = MetadataScheme.A1111
|
|
|
|
return parameters, items, metadata_scheme
|