* feat: add metadata logging for images inspired by https://github.com/MoonRide303/Fooocus-MRE * feat: add config and checkbox for save_metadata_to_images * feat: add argument disable_metadata * feat: add support for A1111 metadata schemacf2772fab0/modules/processing.py (L672)
* feat: add model hash support for a1111 * feat: use resolved prompts with included expansion and styles for a1111 metadata * fix: code cleanup and resolved prompt fixes * feat: add config metadata_created_by * fix: use stting isntead of quote wrap for A1111 created_by * fix: correctlyy hide/show metadata schema on app start * fix: do not generate hashes when arg --disable-metadata is used * refactor: rename metadata_schema to metadata_scheme * fix: use pnginfo "parameters" insteadf of "Comments" see https://github.com/RupertAvery/DiffusionToolkit/issues/202 andcf2772fab0/modules/processing.py (L939)
* feat: add resolved prompts to metadata * fix: use correct default value in metadata check for created_by * wip: add metadata mapping, reading and writing applying data after reading currently not functional for A1111 * feat: rename metadata tab and import button label * feat: map basic information for scheme A1111 * wip: optimize handling for metadata in Gradio calls * feat: add enums for Performance, Steps and StepsUOV also move MetadataSchema enum to prevent circular dependency * fix: correctly map resolution, use empty styles for A1111 * chore: code cleanup * feat: add A1111 prompt style detection only detects one style as Fooocus doesn't wrap {prompt} with the whole style, but has a separate prompt string for each style * wip: add prompt style extraction for A1111 scheme * feat: sort styles after metadata import * refactor: use central flag for LoRA count * refactor: use central flag for ControlNet image count * fix: use correct LoRA mapping, add fallback for backwards compatibility * feat: add created_by again * feat: add prefix "Fooocus" to version * wip: code cleanup, update todos * fix: use correct order to read LoRA in meta parser * wip: code cleanup, update todos * feat: make sha256 with length 10 default * feat: add lora handling to A1111 scheme * feat: override existing LoRA values when importing, would cause images to differ * fix: correctly extract prompt style when only prompt expansion is selected * feat: allow model / LoRA loading from subfolders * feat: code cleanup, do not queue metadata preview on image upload * refactor: add flag for refiner_swap_method * feat: add metadata handling for all non-img2img parameters * refactor: code cleanup * chore: use str as return type in calculate_sha256 * feat: add hash cache to metadata * chore: code cleanup * feat: add method get_scheme to Metadata * fix: align handling for scheme Fooocus by removing lcm lora from json parsing * refactor: add step before parsing to set data in parser - add constructor for MetadataSchema class - remove showable and copyable from log output - add functional hash cache (model hashing takes about 5 seconds, only required once per model, using hash lazy loading) * feat: sort metadata attributes before writing to image * feat: add translations and hint for image prompt parameters * chore: check and remove ToDo's * refactor: merge metadata.py into meta_parser.py * fix: add missing refiner in A1111 parse_json * wip: add TODO for ultiline prompt style resolution * fix: remove sorting for A1111, change performance key position fixes https://github.com/lllyasviel/Fooocus/pull/1940#issuecomment-1924444633 * fix: add workaround for multiline prompts * feat: add sampler mapping * feat: prevent config reset by renaming metadata_scheme to match config options * chore: remove remaining todos after analysis refiner is added when set restoring multiline prompts has been resolved by using separate parameters "raw_prompt" and "raw_negative_prompt" * chore: specify too broad exception types * feat: add mapping for _gpu samplers to cpu samplers gpu samplers are less deterministic than cpu but in general similar, see https://www.reddit.com/r/comfyui/comments/15hayzo/comment/juqcpep/ * feat: add better handling for image import with empty metadata * fix: parse adaptive_cfg as float instead of string * chore: loosen strict type for parse_json, fix indent * chore: make steps enums more strict * feat: only override steps if metadata value is not in steps enum or in steps enum and performance is not the same * fix: handle empty strings in metadata e.g. raw negative prompt when none is set
124 lines
3.0 KiB
Python
124 lines
3.0 KiB
Python
from enum import IntEnum, Enum
|
|
|
|
disabled = 'Disabled'
|
|
enabled = 'Enabled'
|
|
subtle_variation = 'Vary (Subtle)'
|
|
strong_variation = 'Vary (Strong)'
|
|
upscale_15 = 'Upscale (1.5x)'
|
|
upscale_2 = 'Upscale (2x)'
|
|
upscale_fast = 'Upscale (Fast 2x)'
|
|
|
|
uov_list = [
|
|
disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast
|
|
]
|
|
|
|
CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"]
|
|
|
|
# fooocus: a1111 (Civitai)
|
|
KSAMPLER = {
|
|
"euler": "Euler",
|
|
"euler_ancestral": "Euler a",
|
|
"heun": "Heun",
|
|
"heunpp2": "",
|
|
"dpm_2": "DPM2",
|
|
"dpm_2_ancestral": "DPM2 a",
|
|
"lms": "LMS",
|
|
"dpm_fast": "DPM fast",
|
|
"dpm_adaptive": "DPM adaptive",
|
|
"dpmpp_2s_ancestral": "DPM++ 2S a",
|
|
"dpmpp_sde": "DPM++ SDE",
|
|
"dpmpp_sde_gpu": "DPM++ SDE",
|
|
"dpmpp_2m": "DPM++ 2M",
|
|
"dpmpp_2m_sde": "DPM++ 2M SDE",
|
|
"dpmpp_2m_sde_gpu": "DPM++ 2M SDE",
|
|
"dpmpp_3m_sde": "",
|
|
"dpmpp_3m_sde_gpu": "",
|
|
"ddpm": "",
|
|
"lcm": "LCM"
|
|
}
|
|
|
|
SAMPLER_EXTRA = {
|
|
"ddim": "DDIM",
|
|
"uni_pc": "UniPC",
|
|
"uni_pc_bh2": ""
|
|
}
|
|
|
|
SAMPLERS = KSAMPLER | SAMPLER_EXTRA
|
|
|
|
KSAMPLER_NAMES = list(KSAMPLER.keys())
|
|
|
|
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"]
|
|
SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys())
|
|
|
|
sampler_list = SAMPLER_NAMES
|
|
scheduler_list = SCHEDULER_NAMES
|
|
|
|
refiner_swap_method = 'joint'
|
|
|
|
cn_ip = "ImagePrompt"
|
|
cn_ip_face = "FaceSwap"
|
|
cn_canny = "PyraCanny"
|
|
cn_cpds = "CPDS"
|
|
|
|
ip_list = [cn_ip, cn_canny, cn_cpds, cn_ip_face]
|
|
default_ip = cn_ip
|
|
|
|
default_parameters = {
|
|
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
|
|
} # stop, weight
|
|
|
|
inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
|
|
inpaint_option_default = 'Inpaint or Outpaint (default)'
|
|
inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)'
|
|
inpaint_option_modify = 'Modify Content (add objects, change background, etc.)'
|
|
inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option_modify]
|
|
|
|
desc_type_photo = 'Photograph'
|
|
desc_type_anime = 'Art/Anime'
|
|
|
|
|
|
class MetadataScheme(Enum):
|
|
FOOOCUS = 'fooocus'
|
|
A1111 = 'a1111'
|
|
|
|
|
|
metadata_scheme = [
|
|
(f'{MetadataScheme.FOOOCUS.value} (json)', MetadataScheme.FOOOCUS.value),
|
|
(f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value),
|
|
]
|
|
|
|
lora_count = 5
|
|
|
|
controlnet_image_count = 4
|
|
|
|
|
|
class Steps(IntEnum):
|
|
QUALITY = 60
|
|
SPEED = 30
|
|
EXTREME_SPEED = 8
|
|
|
|
|
|
class StepsUOV(IntEnum):
|
|
QUALITY = 36
|
|
SPEED = 18
|
|
EXTREME_SPEED = 8
|
|
|
|
|
|
class Performance(Enum):
|
|
QUALITY = 'Quality'
|
|
SPEED = 'Speed'
|
|
EXTREME_SPEED = 'Extreme Speed'
|
|
|
|
@classmethod
|
|
def list(cls) -> list:
|
|
return list(map(lambda c: c.value, cls))
|
|
|
|
def steps(self) -> int | None:
|
|
return Steps[self.name].value if Steps[self.name] else None
|
|
|
|
def steps_uov(self) -> int | None:
|
|
return StepsUOV[self.name].value if Steps[self.name] else None
|
|
|
|
|
|
performance_selections = Performance.list()
|