Merge pull request #2558 from lllyasviel/develop

release 2.3.0
This commit is contained in:
Manuel Schmid 2024-03-18 18:33:27 +01:00 committed by GitHub
commit a1bda88aa3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 640 additions and 303 deletions

View File

@ -4,7 +4,10 @@ import os
from tempfile import gettempdir from tempfile import gettempdir
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.") args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.") args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.")
args_parser.parser.add_argument("--disable-preset-selection", action='store_true',
help="Disables preset selection in Gradio.")
args_parser.parser.add_argument("--language", type=str, default='default', args_parser.parser.add_argument("--language", type=str, default='default',
help="Translate UI using json files in [language] folder. " help="Translate UI using json files in [language] folder. "
@ -49,7 +52,4 @@ if args_parser.args.disable_analytics:
if args_parser.args.disable_in_browser: if args_parser.args.disable_in_browser:
args_parser.args.in_browser = False args_parser.args.in_browser = False
if args_parser.args.temp_path is None:
args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus')
args = args_parser.args args = args_parser.args

View File

@ -1,5 +1,136 @@
/* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */ /* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */
.loader-container {
display: flex; /* Use flex to align items horizontally */
align-items: center; /* Center items vertically within the container */
white-space: nowrap; /* Prevent line breaks within the container */
}
.loader {
border: 8px solid #f3f3f3; /* Light grey */
border-top: 8px solid #3498db; /* Blue */
border-radius: 50%;
width: 30px;
height: 30px;
animation: spin 2s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* Style the progress bar */
progress {
appearance: none; /* Remove default styling */
height: 20px; /* Set the height of the progress bar */
border-radius: 5px; /* Round the corners of the progress bar */
background-color: #f3f3f3; /* Light grey background */
width: 100%;
}
/* Style the progress bar container */
.progress-container {
margin-left: 20px;
margin-right: 20px;
flex-grow: 1; /* Allow the progress container to take up remaining space */
}
/* Set the color of the progress bar fill */
progress::-webkit-progress-value {
background-color: #3498db; /* Blue color for the fill */
}
progress::-moz-progress-bar {
background-color: #3498db; /* Blue color for the fill in Firefox */
}
/* Style the text on the progress bar */
progress::after {
content: attr(value '%'); /* Display the progress value followed by '%' */
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white; /* Set text color */
font-size: 14px; /* Set font size */
}
/* Style other texts */
.loader-container > span {
margin-left: 5px; /* Add spacing between the progress bar and the text */
}
.progress-bar > .generating {
display: none !important;
}
.progress-bar{
height: 30px !important;
}
.type_row{
height: 80px !important;
}
.type_row_half{
height: 32px !important;
}
.scroll-hide{
resize: none !important;
}
.refresh_button{
border: none !important;
background: none !important;
font-size: none !important;
box-shadow: none !important;
}
.advanced_check_row{
width: 250px !important;
}
.min_check{
min-width: min(1px, 100%) !important;
}
.resizable_area {
resize: vertical;
overflow: auto !important;
}
.aspect_ratios label {
width: 140px !important;
}
.aspect_ratios label span {
white-space: nowrap !important;
}
.aspect_ratios label input {
margin-left: -5px !important;
}
.lora_enable label {
height: 100%;
}
.lora_enable label input {
margin: auto;
}
.lora_enable label span {
display: none;
}
@-moz-document url-prefix() {
.lora_weight input[type=number] {
width: 80px;
}
}
#context-menu{ #context-menu{
z-index:9999; z-index:9999;
position:absolute; position:absolute;
@ -218,3 +349,48 @@
#stylePreviewOverlay.lower-half { #stylePreviewOverlay.lower-half {
transform: translate(-140px, -140px); transform: translate(-140px, -140px);
} }
/* scrollable box for style selections */
.contain .tabs {
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab {
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab > div:first-child {
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab .style_selections {
min-height: 200px;
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] {
position: absolute; /* remove this to disable scrolling within the checkbox-group */
overflow: auto;
padding-right: 2px;
max-height: 100%;
}
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label {
/* max-width: calc(35% - 15px) !important; */ /* add this to enable 3 columns layout */
flex: calc(50% - 5px) !important;
}
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label span {
/* white-space:nowrap; */ /* add this to disable text wrapping (better choice for 3 columns layout) */
overflow: hidden;
text-overflow: ellipsis;
}
/* styles preview tooltip */
.preview-tooltip {
background-color: #fff8;
font-family: monospace;
text-align: center;
border-radius-top: 5px;
display: none; /* remove this to enable tooltip in preview image */
}

View File

@ -12,7 +12,7 @@
"%cd /content\n", "%cd /content\n",
"!git clone https://github.com/lllyasviel/Fooocus.git\n", "!git clone https://github.com/lllyasviel/Fooocus.git\n",
"%cd /content/Fooocus\n", "%cd /content/Fooocus\n",
"!python entry_with_update.py --share\n" "!python entry_with_update.py --share --always-high-vram\n"
] ]
} }
], ],

View File

@ -1 +1 @@
version = '2.2.1' version = '2.3.0'

View File

@ -150,9 +150,12 @@ function initStylePreviewOverlay() {
let overlayVisible = false; let overlayVisible = false;
const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content") const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content")
const overlay = document.createElement('div'); const overlay = document.createElement('div');
const tooltip = document.createElement('div');
tooltip.className = 'preview-tooltip';
overlay.appendChild(tooltip);
overlay.id = 'stylePreviewOverlay'; overlay.id = 'stylePreviewOverlay';
document.body.appendChild(overlay); document.body.appendChild(overlay);
document.addEventListener('mouseover', function(e) { document.addEventListener('mouseover', function (e) {
const label = e.target.closest('.style_selections label'); const label = e.target.closest('.style_selections label');
if (!label) return; if (!label) return;
label.removeEventListener("mouseout", onMouseLeave); label.removeEventListener("mouseout", onMouseLeave);
@ -162,9 +165,12 @@ function initStylePreviewOverlay() {
const originalText = label.querySelector("span").getAttribute("data-original-text"); const originalText = label.querySelector("span").getAttribute("data-original-text");
const name = originalText || label.querySelector("span").textContent; const name = originalText || label.querySelector("span").textContent;
overlay.style.backgroundImage = `url("${samplesPath.replace( overlay.style.backgroundImage = `url("${samplesPath.replace(
"fooocus_v2", "fooocus_v2",
name.toLowerCase().replaceAll(" ", "_") name.toLowerCase().replaceAll(" ", "_")
).replaceAll("\\", "\\\\")}")`; ).replaceAll("\\", "\\\\")}")`;
tooltip.textContent = name;
function onMouseLeave() { function onMouseLeave() {
overlayVisible = false; overlayVisible = false;
overlay.style.opacity = "0"; overlay.style.opacity = "0";
@ -172,8 +178,8 @@ function initStylePreviewOverlay() {
label.removeEventListener("mouseout", onMouseLeave); label.removeEventListener("mouseout", onMouseLeave);
} }
}); });
document.addEventListener('mousemove', function(e) { document.addEventListener('mousemove', function (e) {
if(!overlayVisible) return; if (!overlayVisible) return;
overlay.style.left = `${e.clientX}px`; overlay.style.left = `${e.clientX}px`;
overlay.style.top = `${e.clientY}px`; overlay.style.top = `${e.clientY}px`;
overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half"; overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half";

View File

@ -38,9 +38,12 @@
"* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)", "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)",
"Setting": "Setting", "Setting": "Setting",
"Style": "Style", "Style": "Style",
"Preset": "Preset",
"Performance": "Performance", "Performance": "Performance",
"Speed": "Speed", "Speed": "Speed",
"Quality": "Quality", "Quality": "Quality",
"Extreme Speed": "Extreme Speed",
"Lightning": "Lightning",
"Aspect Ratios": "Aspect Ratios", "Aspect Ratios": "Aspect Ratios",
"width \u00d7 height": "width \u00d7 height", "width \u00d7 height": "width \u00d7 height",
"Image Number": "Image Number", "Image Number": "Image Number",
@ -50,6 +53,7 @@
"Seed": "Seed", "Seed": "Seed",
"Disable seed increment": "Disable seed increment", "Disable seed increment": "Disable seed increment",
"Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.", "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.",
"Read wildcards in order": "Read wildcards in order",
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log", "\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
"Image Style": "Image Style", "Image Style": "Image Style",
"Fooocus V2": "Fooocus V2", "Fooocus V2": "Fooocus V2",
@ -367,7 +371,6 @@
"B2": "B2", "B2": "B2",
"S1": "S1", "S1": "S1",
"S2": "S2", "S2": "S2",
"Extreme Speed": "Extreme Speed",
"\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...", "\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...",
"Type prompt here.": "Type prompt here.", "Type prompt here.": "Type prompt here.",
"Outpaint Expansion Direction:": "Outpaint Expansion Direction:", "Outpaint Expansion Direction:": "Outpaint Expansion Direction:",
@ -381,5 +384,6 @@
"Metadata Scheme": "Metadata Scheme", "Metadata Scheme": "Metadata Scheme",
"Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.", "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
"fooocus (json)": "fooocus (json)", "fooocus (json)": "fooocus (json)",
"a1111 (plain text)": "a1111 (plain text)" "a1111 (plain text)": "a1111 (plain text)",
"Unsupported image type in input": "Unsupported image type in input"
} }

View File

@ -1,6 +1,6 @@
import os import os
import sys
import ssl import ssl
import sys
print('[System ARGV] ' + str(sys.argv)) print('[System ARGV] ' + str(sys.argv))
@ -15,15 +15,13 @@ if "GRADIO_SERVER_PORT" not in os.environ:
ssl._create_default_https_context = ssl._create_unverified_context ssl._create_default_https_context = ssl._create_unverified_context
import platform import platform
import fooocus_version import fooocus_version
from build_launcher import build_launcher from build_launcher import build_launcher
from modules.launch_util import is_installed, run, python, run_pip, requirements_met from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content
from modules.model_loader import load_file_from_url from modules.model_loader import load_file_from_url
REINSTALL_ALL = False REINSTALL_ALL = False
TRY_INSTALL_XFORMERS = False TRY_INSTALL_XFORMERS = False
@ -68,6 +66,7 @@ vae_approx_filenames = [
'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
] ]
def ini_args(): def ini_args():
from args_manager import args from args_manager import args
return args return args
@ -77,15 +76,24 @@ prepare_environment()
build_launcher() build_launcher()
args = ini_args() args = ini_args()
if args.gpu_device_id is not None: if args.gpu_device_id is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
print("Set device to:", args.gpu_device_id) print("Set device to:", args.gpu_device_id)
from modules import config from modules import config
def download_models(): os.environ['GRADIO_TEMP_DIR'] = config.temp_path
if config.temp_path_cleanup_on_launch:
print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
result = delete_folder_content(config.temp_path, '[Cleanup] ')
if result:
print("[Cleanup] Cleanup successful")
else:
print(f"[Cleanup] Failed to delete content of temp dir.")
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
for file_name, url in vae_approx_filenames: for file_name, url in vae_approx_filenames:
load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
@ -97,31 +105,32 @@ def download_models():
if args.disable_preset_download: if args.disable_preset_download:
print('Skipped model download.') print('Skipped model download.')
return return default_model, checkpoint_downloads
if not args.always_download_new_model: if not args.always_download_new_model:
if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)): if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)):
for alternative_model_name in config.previous_default_models: for alternative_model_name in previous_default_models:
if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)): if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)):
print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') print(f'You do not have [{default_model}] but you have [{alternative_model_name}].')
print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
f'but you are not using latest models.') f'but you are not using the latest models.')
print('Use --always-download-new-model to avoid fallback and always get new models.') print('Use --always-download-new-model to avoid fallback and always get new models.')
config.checkpoint_downloads = {} checkpoint_downloads = {}
config.default_base_model_name = alternative_model_name default_model = alternative_model_name
break break
for file_name, url in config.checkpoint_downloads.items(): for file_name, url in checkpoint_downloads.items():
load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name) load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
for file_name, url in config.embeddings_downloads.items(): for file_name, url in embeddings_downloads.items():
load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
for file_name, url in config.lora_downloads.items(): for file_name, url in lora_downloads.items():
load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
return return default_model, checkpoint_downloads
download_models() config.default_base_model_name, config.checkpoint_downloads = download_models(
config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
config.embeddings_downloads, config.lora_downloads)
from webui import * from webui import *

View File

@ -1,4 +1,5 @@
import threading import threading
import re
from modules.patch import PatchSettings, patch_settings, patch_all from modules.patch import PatchSettings, patch_settings, patch_all
patch_all() patch_all()
@ -45,8 +46,8 @@ def worker():
from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays
from modules.private_logger import log from modules.private_logger import log
from extras.expansion import safe_str from extras.expansion import safe_str
from modules.util import remove_empty_str, HWC3, resize_image, \ from modules.util import remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil, \
get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix, get_enabled_loras
from modules.upscaler import perform_upscale from modules.upscaler import perform_upscale
from modules.flags import Performance from modules.flags import Performance
from modules.meta_parser import get_metadata_parser, MetadataScheme from modules.meta_parser import get_metadata_parser, MetadataScheme
@ -123,14 +124,6 @@ def worker():
async_task.results = async_task.results + [wall] async_task.results = async_task.results + [wall]
return return
def apply_enabled_loras(loras):
enabled_loras = []
for lora_enabled, lora_model, lora_weight in loras:
if lora_enabled:
enabled_loras.append([lora_model, lora_weight])
return enabled_loras
@torch.no_grad() @torch.no_grad()
@torch.inference_mode() @torch.inference_mode()
def handler(async_task): def handler(async_task):
@ -148,12 +141,13 @@ def worker():
image_number = args.pop() image_number = args.pop()
output_format = args.pop() output_format = args.pop()
image_seed = args.pop() image_seed = args.pop()
read_wildcards_in_order = args.pop()
sharpness = args.pop() sharpness = args.pop()
guidance_scale = args.pop() guidance_scale = args.pop()
base_model_name = args.pop() base_model_name = args.pop()
refiner_model_name = args.pop() refiner_model_name = args.pop()
refiner_switch = args.pop() refiner_switch = args.pop()
loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)]) loras = get_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop())] for _ in range(modules.config.default_max_lora_number)])
input_image_checkbox = args.pop() input_image_checkbox = args.pop()
current_tab = args.pop() current_tab = args.pop()
uov_method = args.pop() uov_method = args.pop()
@ -250,6 +244,25 @@ def worker():
adm_scaler_negative = 1.0 adm_scaler_negative = 1.0
adm_scaler_end = 0.0 adm_scaler_end = 0.0
elif performance_selection == Performance.LIGHTNING:
print('Enter Lightning mode.')
progressbar(async_task, 1, 'Downloading Lightning components ...')
loras += [(modules.config.downloading_sdxl_lightning_lora(), 1.0)]
if refiner_model_name != 'None':
print(f'Refiner disabled in Lightning mode.')
refiner_model_name = 'None'
sampler_name = 'euler'
scheduler_name = 'sgm_uniform'
sharpness = 0.0
guidance_scale = 1.0
adaptive_cfg = 1.0
refiner_switch = 1.0
adm_scaler_positive = 1.0
adm_scaler_negative = 1.0
adm_scaler_end = 0.0
print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') print(f'[Parameters] Adaptive CFG = {adaptive_cfg}')
print(f'[Parameters] Sharpness = {sharpness}') print(f'[Parameters] Sharpness = {sharpness}')
print(f'[Parameters] ControlNet Softness = {controlnet_softness}') print(f'[Parameters] ControlNet Softness = {controlnet_softness}')
@ -347,7 +360,7 @@ def worker():
print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}') print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
if refiner_model_name == 'None': if refiner_model_name == 'None':
use_synthetic_refiner = True use_synthetic_refiner = True
refiner_switch = 0.5 refiner_switch = 0.8
else: else:
inpaint_head_model_path, inpaint_patch_model_path = None, None inpaint_head_model_path, inpaint_patch_model_path = None, None
print(f'[Inpaint] Parameterized inpaint is disabled.') print(f'[Inpaint] Parameterized inpaint is disabled.')
@ -422,16 +435,16 @@ def worker():
for i in range(image_number): for i in range(image_number):
if disable_seed_increment: if disable_seed_increment:
task_seed = seed task_seed = seed % (constants.MAX_SEED + 1)
else: else:
task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
task_rng = random.Random(task_seed) # may bind to inpaint noise in the future task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
task_prompt = apply_wildcards(prompt, task_rng) task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order)
task_prompt = apply_arrays(task_prompt, i) task_prompt = apply_arrays(task_prompt, i)
task_negative_prompt = apply_wildcards(negative_prompt, task_rng) task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order)
task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_positive_prompts]
task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_negative_prompts]
positive_basic_workloads = [] positive_basic_workloads = []
negative_basic_workloads = [] negative_basic_workloads = []

View File

@ -3,12 +3,14 @@ import json
import math import math
import numbers import numbers
import args_manager import args_manager
import tempfile
import modules.flags import modules.flags
import modules.sdxl_styles import modules.sdxl_styles
from modules.model_loader import load_file_from_url from modules.model_loader import load_file_from_url
from modules.util import get_files_from_folder, makedirs_with_log from modules.util import get_files_from_folder, makedirs_with_log
from modules.flags import Performance, MetadataScheme from modules.flags import OutputFormat, Performance, MetadataScheme
def get_config_path(key, default_value): def get_config_path(key, default_value):
env = os.getenv(key) env = os.getenv(key)
@ -18,6 +20,7 @@ def get_config_path(key, default_value):
else: else:
return os.path.abspath(default_value) return os.path.abspath(default_value)
config_path = get_config_path('config_path', "./config.txt") config_path = get_config_path('config_path', "./config.txt")
config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt")
config_dict = {} config_dict = {}
@ -94,21 +97,44 @@ def try_load_deprecated_user_path_config():
try_load_deprecated_user_path_config() try_load_deprecated_user_path_config()
def get_presets():
preset_folder = 'presets'
presets = ['initial']
if not os.path.exists(preset_folder):
print('No presets found.')
return presets
return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')]
def try_get_preset_content(preset):
if isinstance(preset, str):
preset_path = os.path.abspath(f'./presets/{preset}.json')
try:
if os.path.exists(preset_path):
with open(preset_path, "r", encoding="utf-8") as json_file:
json_content = json.load(json_file)
print(f'Loaded preset: {preset_path}')
return json_content
else:
raise FileNotFoundError
except Exception as e:
print(f'Load preset [{preset_path}] failed')
print(e)
return {}
try:
with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file:
config_dict.update(json.load(json_file))
except Exception as e:
print(f'Load default preset failed.')
print(e)
available_presets = get_presets()
preset = args_manager.args.preset preset = args_manager.args.preset
config_dict.update(try_get_preset_content(preset))
if isinstance(preset, str):
preset_path = os.path.abspath(f'./presets/{preset}.json')
try:
if os.path.exists(preset_path):
with open(preset_path, "r", encoding="utf-8") as json_file:
config_dict.update(json.load(json_file))
print(f'Loaded preset: {preset_path}')
else:
raise FileNotFoundError
except Exception as e:
print(f'Load preset [{preset_path}] failed')
print(e)
def get_path_output() -> str: def get_path_output() -> str:
""" """
@ -117,7 +143,7 @@ def get_path_output() -> str:
global config_dict global config_dict
path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True)
if args_manager.args.output_path: if args_manager.args.output_path:
print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') print(f'Overriding config value path_outputs with {args_manager.args.output_path}')
config_dict['path_outputs'] = path_output = args_manager.args.output_path config_dict['path_outputs'] = path_output = args_manager.args.output_path
return path_output return path_output
@ -176,8 +202,10 @@ path_inpaint = get_dir_or_set_default('path_inpaint', '../models/inpaint/')
path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/') path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/')
path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/') path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/')
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
path_outputs = get_path_output() path_outputs = get_path_output()
def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False):
global config_dict, visited_keys global config_dict, visited_keys
@ -206,7 +234,37 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_
return default_value return default_value
default_base_model_name = get_config_item_or_set_default( def init_temp_path(path: str | None, default_path: str) -> str:
if args_manager.args.temp_path:
path = args_manager.args.temp_path
if path != '' and path != default_path:
try:
if not os.path.isabs(path):
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
print(f'Using temp path {path}')
return path
except Exception as e:
print(f'Could not create temp path {path}. Reason: {e}')
print(f'Using default temp path {default_path} instead.')
os.makedirs(default_path, exist_ok=True)
return default_path
default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus')
temp_path = init_temp_path(get_config_item_or_set_default(
key='temp_path',
default_value=default_temp_path,
validator=lambda x: isinstance(x, str),
), default_temp_path)
temp_path_cleanup_on_launch = get_config_item_or_set_default(
key='temp_path_cleanup_on_launch',
default_value=True,
validator=lambda x: isinstance(x, bool)
)
default_base_model_name = default_model = get_config_item_or_set_default(
key='default_model', key='default_model',
default_value='model.safetensors', default_value='model.safetensors',
validator=lambda x: isinstance(x, str) validator=lambda x: isinstance(x, str)
@ -216,7 +274,7 @@ previous_default_models = get_config_item_or_set_default(
default_value=[], default_value=[],
validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x) validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x)
) )
default_refiner_model_name = get_config_item_or_set_default( default_refiner_model_name = default_refiner = get_config_item_or_set_default(
key='default_refiner', key='default_refiner',
default_value='None', default_value='None',
validator=lambda x: isinstance(x, str) validator=lambda x: isinstance(x, str)
@ -240,28 +298,37 @@ default_loras = get_config_item_or_set_default(
key='default_loras', key='default_loras',
default_value=[ default_value=[
[ [
True,
"None", "None",
1.0 1.0
], ],
[ [
True,
"None", "None",
1.0 1.0
], ],
[ [
True,
"None", "None",
1.0 1.0
], ],
[ [
True,
"None", "None",
1.0 1.0
], ],
[ [
True,
"None", "None",
1.0 1.0
] ]
], ],
validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x) validator=lambda x: isinstance(x, list) and all(
len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number)
or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number)
for y in x)
) )
default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras]
default_max_lora_number = get_config_item_or_set_default( default_max_lora_number = get_config_item_or_set_default(
key='default_max_lora_number', key='default_max_lora_number',
default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5, default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5,
@ -326,7 +393,7 @@ default_max_image_number = get_config_item_or_set_default(
default_output_format = get_config_item_or_set_default( default_output_format = get_config_item_or_set_default(
key='default_output_format', key='default_output_format',
default_value='png', default_value='png',
validator=lambda x: x in modules.flags.output_formats validator=lambda x: x in OutputFormat.list()
) )
default_image_number = get_config_item_or_set_default( default_image_number = get_config_item_or_set_default(
key='default_image_number', key='default_image_number',
@ -411,29 +478,30 @@ example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]
possible_preset_keys = [ # mapping config to meta parameter
"default_model", possible_preset_keys = {
"default_refiner", "default_model": "base_model",
"default_refiner_switch", "default_refiner": "refiner_model",
"default_loras_min_weight", "default_refiner_switch": "refiner_switch",
"default_loras_max_weight", "previous_default_models": "previous_default_models",
"default_loras", "default_loras_min_weight": "default_loras_min_weight",
"default_max_lora_number", "default_loras_max_weight": "default_loras_max_weight",
"default_cfg_scale", "default_loras": "<processed>",
"default_sample_sharpness", "default_cfg_scale": "guidance_scale",
"default_sampler", "default_sample_sharpness": "sharpness",
"default_scheduler", "default_sampler": "sampler",
"default_performance", "default_scheduler": "scheduler",
"default_prompt", "default_overwrite_step": "steps",
"default_prompt_negative", "default_performance": "performance",
"default_styles", "default_prompt": "prompt",
"default_aspect_ratio", "default_prompt_negative": "negative_prompt",
"default_save_metadata_to_images", "default_styles": "styles",
"checkpoint_downloads", "default_aspect_ratio": "resolution",
"embeddings_downloads", "default_save_metadata_to_images": "default_save_metadata_to_images",
"lora_downloads", "checkpoint_downloads": "checkpoint_downloads",
] "embeddings_downloads": "embeddings_downloads",
"lora_downloads": "lora_downloads"
}
REWRITE_PRESET = False REWRITE_PRESET = False
@ -474,21 +542,27 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
model_filenames = [] model_filenames = []
lora_filenames = [] lora_filenames = []
wildcard_filenames = []
sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors'
def get_model_filenames(folder_paths, name_filter=None): def get_model_filenames(folder_paths, extensions=None, name_filter=None):
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] if extensions is None:
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
files = [] files = []
for folder in folder_paths: for folder in folder_paths:
files += get_files_from_folder(folder, extensions, name_filter) files += get_files_from_folder(folder, extensions, name_filter)
return files return files
def update_all_model_names(): def update_files():
global model_filenames, lora_filenames global model_filenames, lora_filenames, wildcard_filenames, available_presets
model_filenames = get_model_filenames(paths_checkpoints) model_filenames = get_model_filenames(paths_checkpoints)
lora_filenames = get_model_filenames(paths_loras) lora_filenames = get_model_filenames(paths_loras)
wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
available_presets = get_presets()
return return
@ -538,6 +612,14 @@ def downloading_sdxl_lcm_lora():
) )
return sdxl_lcm_lora return sdxl_lcm_lora
def downloading_sdxl_lightning_lora():
load_file_from_url(
url='https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_4step_lora.safetensors',
model_dir=paths_loras[0],
file_name=sdxl_lightning_lora
)
return sdxl_lightning_lora
def downloading_controlnet_canny(): def downloading_controlnet_canny():
load_file_from_url( load_file_from_url(
@ -604,4 +686,4 @@ def downloading_upscale_model():
return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin') return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
update_all_model_names() update_files()

View File

@ -73,14 +73,14 @@ class StableDiffusionModel:
loras_to_load = [] loras_to_load = []
for name, weight in loras: for filename, weight in loras:
if name == 'None': if filename == 'None':
continue continue
if os.path.exists(name): if os.path.exists(filename):
lora_filename = name lora_filename = filename
else: else:
lora_filename = get_file_from_folder_list(name, modules.config.paths_loras) lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras)
if not os.path.exists(lora_filename): if not os.path.exists(lora_filename):
print(f'Lora file not found: {lora_filename}') print(f'Lora file not found: {lora_filename}')

View File

@ -11,7 +11,7 @@ from extras.expansion import FooocusExpansion
from ldm_patched.modules.model_base import SDXL, SDXLRefiner from ldm_patched.modules.model_base import SDXL, SDXLRefiner
from modules.sample_hijack import clip_separate from modules.sample_hijack import clip_separate
from modules.util import get_file_from_folder_list from modules.util import get_file_from_folder_list, get_enabled_loras
model_base = core.StableDiffusionModel() model_base = core.StableDiffusionModel()
@ -254,7 +254,7 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
refresh_everything( refresh_everything(
refiner_model_name=modules.config.default_refiner_model_name, refiner_model_name=modules.config.default_refiner_model_name,
base_model_name=modules.config.default_base_model_name, base_model_name=modules.config.default_base_model_name,
loras=modules.config.default_loras loras=get_enabled_loras(modules.config.default_loras)
) )

View File

@ -67,7 +67,7 @@ default_parameters = {
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
} # stop, weight } # stop, weight
output_formats = ['png', 'jpg', 'webp'] output_formats = ['png', 'jpeg', 'webp']
inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_default = 'Inpaint or Outpaint (default)'
@ -89,37 +89,51 @@ metadata_scheme = [
(f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value),
] ]
lora_count = 5
controlnet_image_count = 4 controlnet_image_count = 4
class OutputFormat(Enum):
PNG = 'png'
JPEG = 'jpeg'
WEBP = 'webp'
@classmethod
def list(cls) -> list:
return list(map(lambda c: c.value, cls))
class Steps(IntEnum): class Steps(IntEnum):
QUALITY = 60 QUALITY = 60
SPEED = 30 SPEED = 30
EXTREME_SPEED = 8 EXTREME_SPEED = 8
LIGHTNING = 4
class StepsUOV(IntEnum): class StepsUOV(IntEnum):
QUALITY = 36 QUALITY = 36
SPEED = 18 SPEED = 18
EXTREME_SPEED = 8 EXTREME_SPEED = 8
LIGHTNING = 4
class Performance(Enum): class Performance(Enum):
QUALITY = 'Quality' QUALITY = 'Quality'
SPEED = 'Speed' SPEED = 'Speed'
EXTREME_SPEED = 'Extreme Speed' EXTREME_SPEED = 'Extreme Speed'
LIGHTNING = 'Lightning'
@classmethod @classmethod
def list(cls) -> list: def list(cls) -> list:
return list(map(lambda c: c.value, cls)) return list(map(lambda c: c.value, cls))
@classmethod
def has_restricted_features(cls, x) -> bool:
if isinstance(x, Performance):
x = x.value
return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value]
def steps(self) -> int | None: def steps(self) -> int | None:
return Steps[self.name].value if Steps[self.name] else None return Steps[self.name].value if Steps[self.name] else None
def steps_uov(self) -> int | None: def steps_uov(self) -> int | None:
return StepsUOV[self.name].value if Steps[self.name] else None return StepsUOV[self.name].value if Steps[self.name] else None
performance_selections = Performance.list()

View File

@ -17,7 +17,7 @@ from gradio_client.documentation import document, set_documentation_group
from gradio_client.serializing import ImgSerializable from gradio_client.serializing import ImgSerializable
from PIL import Image as _Image # using _ to minimize namespace pollution from PIL import Image as _Image # using _ to minimize namespace pollution
from gradio import processing_utils, utils from gradio import processing_utils, utils, Error
from gradio.components.base import IOComponent, _Keywords, Block from gradio.components.base import IOComponent, _Keywords, Block
from gradio.deprecation import warn_style_method_deprecation from gradio.deprecation import warn_style_method_deprecation
from gradio.events import ( from gradio.events import (
@ -275,7 +275,10 @@ class Image(
x, mask = x["image"], x["mask"] x, mask = x["image"], x["mask"]
assert isinstance(x, str) assert isinstance(x, str)
im = processing_utils.decode_base64_to_image(x) try:
im = processing_utils.decode_base64_to_image(x)
except PIL.UnidentifiedImageError:
raise Error("Unsupported image type in input")
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") warnings.simplefilter("ignore")
im = im.convert(self.image_mode) im = im.convert(self.image_mode)

View File

@ -1,136 +1,3 @@
css = '''
.loader-container {
display: flex; /* Use flex to align items horizontally */
align-items: center; /* Center items vertically within the container */
white-space: nowrap; /* Prevent line breaks within the container */
}
.loader {
border: 8px solid #f3f3f3; /* Light grey */
border-top: 8px solid #3498db; /* Blue */
border-radius: 50%;
width: 30px;
height: 30px;
animation: spin 2s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* Style the progress bar */
progress {
appearance: none; /* Remove default styling */
height: 20px; /* Set the height of the progress bar */
border-radius: 5px; /* Round the corners of the progress bar */
background-color: #f3f3f3; /* Light grey background */
width: 100%;
}
/* Style the progress bar container */
.progress-container {
margin-left: 20px;
margin-right: 20px;
flex-grow: 1; /* Allow the progress container to take up remaining space */
}
/* Set the color of the progress bar fill */
progress::-webkit-progress-value {
background-color: #3498db; /* Blue color for the fill */
}
progress::-moz-progress-bar {
background-color: #3498db; /* Blue color for the fill in Firefox */
}
/* Style the text on the progress bar */
progress::after {
content: attr(value '%'); /* Display the progress value followed by '%' */
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white; /* Set text color */
font-size: 14px; /* Set font size */
}
/* Style other texts */
.loader-container > span {
margin-left: 5px; /* Add spacing between the progress bar and the text */
}
.progress-bar > .generating {
display: none !important;
}
.progress-bar{
height: 30px !important;
}
.type_row{
height: 80px !important;
}
.type_row_half{
height: 32px !important;
}
.scroll-hide{
resize: none !important;
}
.refresh_button{
border: none !important;
background: none !important;
font-size: none !important;
box-shadow: none !important;
}
.advanced_check_row{
width: 250px !important;
}
.min_check{
min-width: min(1px, 100%) !important;
}
.resizable_area {
resize: vertical;
overflow: auto !important;
}
.aspect_ratios label {
width: 140px !important;
}
.aspect_ratios label span {
white-space: nowrap !important;
}
.aspect_ratios label input {
margin-left: -5px !important;
}
.lora_enable label {
height: 100%;
}
.lora_enable label input {
margin: auto;
}
.lora_enable label span {
display: none;
}
@-moz-document url-prefix() {
.lora_weight input[type=number] {
width: 80px;
}
}
'''
progress_html = ''' progress_html = '''
<div class="loader-container"> <div class="loader-container">
<div class="loader"></div> <div class="loader"></div>

View File

@ -1,6 +1,7 @@
import os import os
import importlib import importlib
import importlib.util import importlib.util
import shutil
import subprocess import subprocess
import sys import sys
import re import re
@ -9,9 +10,6 @@ import importlib.metadata
import packaging.version import packaging.version
from packaging.requirements import Requirement from packaging.requirements import Requirement
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
@ -101,3 +99,19 @@ def requirements_met(requirements_file):
return True return True
def delete_folder_content(folder, prefix=None):
result = True
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f'{prefix}Failed to delete {file_path}. Reason: {e}')
result = False
return result

View File

@ -210,9 +210,8 @@ def parse_meta_from_preset(preset_content):
height = height[:height.index(" ")] height = height[:height.index(" ")]
preset_prepared[meta_key] = (width, height) preset_prepared[meta_key] = (width, height)
else: else:
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[ preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key)
settings_key] is not None else getattr(modules.config, settings_key)
if settings_key == "default_styles" or settings_key == "default_aspect_ratio": if settings_key == "default_styles" or settings_key == "default_aspect_ratio":
preset_prepared[meta_key] = str(preset_prepared[meta_key]) preset_prepared[meta_key] = str(preset_prepared[meta_key])
@ -569,4 +568,4 @@ def get_exif(metadata: str | None, metadata_scheme: str):
exif[0x0131] = 'Fooocus v' + fooocus_version.version exif[0x0131] = 'Fooocus v' + fooocus_version.version
# 0x927C = MakerNote # 0x927C = MakerNote
exif[0x927C] = metadata_scheme exif[0x927C] = metadata_scheme
return exif return exif

View File

@ -6,8 +6,9 @@ import urllib.parse
from PIL import Image from PIL import Image
from PIL.PngImagePlugin import PngInfo from PIL.PngImagePlugin import PngInfo
from modules.util import generate_temp_filename from modules.flags import OutputFormat
from modules.meta_parser import MetadataParser, get_exif from modules.meta_parser import MetadataParser, get_exif
from modules.util import generate_temp_filename
log_cache = {} log_cache = {}
@ -21,7 +22,7 @@ def get_current_html_path(output_format=None):
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str:
path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
output_format = output_format if output_format else modules.config.default_output_format output_format = output_format if output_format else modules.config.default_output_format
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)
@ -29,7 +30,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else '' parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else ''
image = Image.fromarray(img) image = Image.fromarray(img)
if output_format == 'png': if output_format == OutputFormat.PNG.value:
if parsed_parameters != '': if parsed_parameters != '':
pnginfo = PngInfo() pnginfo = PngInfo()
pnginfo.add_text('parameters', parsed_parameters) pnginfo.add_text('parameters', parsed_parameters)
@ -37,9 +38,9 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
else: else:
pnginfo = None pnginfo = None
image.save(local_temp_filename, pnginfo=pnginfo) image.save(local_temp_filename, pnginfo=pnginfo)
elif output_format == 'jpg': elif output_format == OutputFormat.JPEG.value:
image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
elif output_format == 'webp': elif output_format == OutputFormat.WEBP.value:
image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
else: else:
image.save(local_temp_filename) image.save(local_temp_filename)

View File

@ -2,13 +2,12 @@ import os
import re import re
import json import json
import math import math
import modules.config
from modules.util import get_files_from_folder from modules.util import get_files_from_folder
# cannot use modules.config - validators causing circular imports # cannot use modules.config - validators causing circular imports
styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/')) styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/'))
wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/'))
wildcards_max_bfs_depth = 64 wildcards_max_bfs_depth = 64
@ -60,7 +59,7 @@ def apply_style(style, positive):
return p.replace('{prompt}', positive).splitlines(), n.splitlines() return p.replace('{prompt}', positive).splitlines(), n.splitlines()
def apply_wildcards(wildcard_text, rng, directory=wildcards_path): def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order):
for _ in range(wildcards_max_bfs_depth): for _ in range(wildcards_max_bfs_depth):
placeholders = re.findall(r'__([\w-]+)__', wildcard_text) placeholders = re.findall(r'__([\w-]+)__', wildcard_text)
if len(placeholders) == 0: if len(placeholders) == 0:
@ -69,10 +68,14 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] processing: {wildcard_text}') print(f'[Wildcards] processing: {wildcard_text}')
for placeholder in placeholders: for placeholder in placeholders:
try: try:
words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines() matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder]
words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines()
words = [x for x in words if x != ''] words = [x for x in words if x != '']
assert len(words) > 0 assert len(words) > 0
wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) if read_wildcards_in_order:
wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1)
else:
wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
except: except:
print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. ' print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. '
f'Using "{placeholder}" as a normal word.') f'Using "{placeholder}" as a normal word.')
@ -82,8 +85,9 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}') print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
return wildcard_text return wildcard_text
def get_words(arrays, totalMult, index): def get_words(arrays, totalMult, index):
if(len(arrays) == 1): if len(arrays) == 1:
return [arrays[0].split(',')[index]] return [arrays[0].split(',')[index]]
else: else:
words = arrays[0].split(',') words = arrays[0].split(',')

View File

@ -163,7 +163,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
return date_string, os.path.abspath(result), filename return date_string, os.path.abspath(result), filename
def get_files_from_folder(folder_path, exensions=None, name_filter=None): def get_files_from_folder(folder_path, extensions=None, name_filter=None):
if not os.path.isdir(folder_path): if not os.path.isdir(folder_path):
raise ValueError("Folder path is not a valid directory.") raise ValueError("Folder path is not a valid directory.")
@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None):
relative_path = "" relative_path = ""
for filename in sorted(files, key=lambda s: s.casefold()): for filename in sorted(files, key=lambda s: s.casefold()):
_, file_extension = os.path.splitext(filename) _, file_extension = os.path.splitext(filename)
if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _):
path = os.path.join(relative_path, filename) path = os.path.join(relative_path, filename)
filenames.append(path) filenames.append(path)
@ -360,3 +360,7 @@ def makedirs_with_log(path):
os.makedirs(path, exist_ok=True) os.makedirs(path, exist_ok=True)
except OSError as error: except OSError as error:
print(f'Directory {path} could not be created, reason: {error}') print(f'Directory {path} could not be created, reason: {error}')
def get_enabled_loras(loras: list) -> list:
return [[lora[1], lora[2]] for lora in loras if lora[0]]

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5, "default_refiner_switch": 0.5,
"default_loras": [ "default_loras": [
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
] ]
@ -33,7 +38,7 @@
"default_prompt_negative": "", "default_prompt_negative": "",
"default_styles": [ "default_styles": [
"Fooocus V2", "Fooocus V2",
"Fooocus Negative", "Fooocus Semi Realistic",
"Fooocus Masterpiece" "Fooocus Masterpiece"
], ],
"default_aspect_ratio": "896*1152", "default_aspect_ratio": "896*1152",

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5, "default_refiner_switch": 0.5,
"default_loras": [ "default_loras": [
[ [
true,
"sd_xl_offset_example-lora_1.0.safetensors", "sd_xl_offset_example-lora_1.0.safetensors",
0.1 0.1
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
] ]

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5, "default_refiner_switch": 0.5,
"default_loras": [ "default_loras": [
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
] ]

57
presets/lightning.json Normal file
View File

@ -0,0 +1,57 @@
{
"default_model": "juggernautXL_v8Rundiffusion.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]
],
"default_cfg_scale": 4.0,
"default_sample_sharpness": 2.0,
"default_sampler": "dpmpp_2m_sde_gpu",
"default_scheduler": "karras",
"default_performance": "Lightning",
"default_prompt": "",
"default_prompt_negative": "",
"default_styles": [
"Fooocus V2",
"Fooocus Enhance",
"Fooocus Sharp"
],
"default_aspect_ratio": "1152*896",
"checkpoint_downloads": {
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {},
"previous_default_models": [
"juggernautXL_version8Rundiffusion.safetensors",
"juggernautXL_version7Rundiffusion.safetensors",
"juggernautXL_v7Rundiffusion.safetensors",
"juggernautXL_version6Rundiffusion.safetensors",
"juggernautXL_v6Rundiffusion.safetensors"
]
}

View File

@ -1,25 +1,30 @@
{ {
"default_model": "realisticStockPhoto_v20.safetensors", "default_model": "realisticStockPhoto_v20.safetensors",
"default_refiner": "", "default_refiner": "None",
"default_refiner_switch": 0.5, "default_refiner_switch": 0.5,
"default_loras": [ "default_loras": [
[ [
true,
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors", "SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
0.25 0.25
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
] ]

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.75, "default_refiner_switch": 0.75,
"default_loras": [ "default_loras": [
[ [
true,
"sd_xl_offset_example-lora_1.0.safetensors", "sd_xl_offset_example-lora_1.0.safetensors",
0.5 0.5
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
], ],
[ [
true,
"None", "None",
1.0 1.0
] ]

View File

@ -84,6 +84,10 @@ The first time you launch the software, it will automatically download models:
After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679). After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior:
* Use `--disable-preset-selection` to disable preset selection in the browser.
* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output.
![image](https://github.com/lllyasviel/Fooocus/assets/19834515/d386f817-4bd7-490c-ad89-c1e228c23447) ![image](https://github.com/lllyasviel/Fooocus/assets/19834515/d386f817-4bd7-490c-ad89-c1e228c23447)
If you already have these files, you can copy them to the above locations to speed up installation. If you already have these files, you can copy them to the above locations to speed up installation.
@ -115,17 +119,21 @@ See also the common problems and troubleshoots [here](troubleshoot.md).
### Colab ### Colab
(Last tested - 2023 Dec 12) (Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t))
| Colab | Info | Colab | Info
| --- | --- | | --- | --- |
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official
In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition. In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition.
You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page.
Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
Thanks to [camenduru](https://github.com/camenduru)! Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346).
Thanks to [camenduru](https://github.com/camenduru) for the template!
### Linux (Using Anaconda) ### Linux (Using Anaconda)

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.4 KiB

View File

@ -3,6 +3,10 @@
"name": "Fooocus Enhance", "name": "Fooocus Enhance",
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
}, },
{
"name": "Fooocus Semi Realistic",
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
},
{ {
"name": "Fooocus Sharp", "name": "Fooocus Sharp",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy",

View File

@ -1,3 +1,14 @@
# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0)
* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors))
* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch.
* Improve face swap consistency by switching later in the process to (synthetic) refiner
* Add temp path cleanup on startup
* Add support for wildcard subdirectories
* Add scrollable 2 column layout for styles for better structure
* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features
* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images)
# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1) # [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1)
* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox) * Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox)

102
webui.py
View File

@ -15,6 +15,7 @@ import modules.style_sorter as style_sorter
import modules.meta_parser import modules.meta_parser
import args_manager import args_manager
import copy import copy
import launch
from modules.sdxl_styles import legal_style_names from modules.sdxl_styles import legal_style_names
from modules.private_logger import get_current_html_path from modules.private_logger import get_current_html_path
@ -28,12 +29,16 @@ def get_task(*args):
return worker.AsyncTask(args=args) return worker.AsyncTask(args=args)
def generate_clicked(task): def generate_clicked(task: worker.AsyncTask):
import ldm_patched.modules.model_management as model_management import ldm_patched.modules.model_management as model_management
with model_management.interrupt_processing_mutex: with model_management.interrupt_processing_mutex:
model_management.interrupt_processing = False model_management.interrupt_processing = False
# outputs=[progress_html, progress_window, progress_gallery, gallery] # outputs=[progress_html, progress_window, progress_gallery, gallery]
if len(task.args) == 0:
return
execution_start_time = time.perf_counter() execution_start_time = time.perf_counter()
finished = False finished = False
@ -91,9 +96,7 @@ title = f'Fooocus {fooocus_version.version}'
if isinstance(args_manager.args.preset, str): if isinstance(args_manager.args.preset, str):
title += ' ' + args_manager.args.preset title += ' ' + args_manager.args.preset
shared.gradio_root = gr.Blocks( shared.gradio_root = gr.Blocks(title=title).queue()
title=title,
css=modules.html.css).queue()
with shared.gradio_root: with shared.gradio_root:
currentTask = gr.State(worker.AsyncTask(args=[])) currentTask = gr.State(worker.AsyncTask(args=[]))
@ -254,8 +257,13 @@ with shared.gradio_root:
with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column: with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
with gr.Tab(label='Setting'): with gr.Tab(label='Setting'):
if not args_manager.args.disable_preset_selection:
preset_selection = gr.Radio(label='Preset',
choices=modules.config.available_presets,
value=args_manager.args.preset if args_manager.args.preset else "initial",
interactive=True)
performance_selection = gr.Radio(label='Performance', performance_selection = gr.Radio(label='Performance',
choices=modules.flags.performance_selections, choices=flags.Performance.list(),
value=modules.config.default_performance) value=modules.config.default_performance)
aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios, aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios,
value=modules.config.default_aspect_ratio, info='width × height', value=modules.config.default_aspect_ratio, info='width × height',
@ -263,7 +271,7 @@ with shared.gradio_root:
image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number) image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
output_format = gr.Radio(label='Output Format', output_format = gr.Radio(label='Output Format',
choices=modules.flags.output_formats, choices=flags.OutputFormat.list(),
value=modules.config.default_output_format) value=modules.config.default_output_format)
negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.", negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
@ -300,7 +308,7 @@ with shared.gradio_root:
history_link = gr.HTML() history_link = gr.HTML()
shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
with gr.Tab(label='Style'): with gr.Tab(label='Style', elem_classes=['style_selections_tab']):
style_sorter.try_load_sorted_styles( style_sorter.try_load_sorted_styles(
style_names=legal_style_names, style_names=legal_style_names,
default_selected=modules.config.default_styles) default_selected=modules.config.default_styles)
@ -353,20 +361,20 @@ with shared.gradio_root:
with gr.Group(): with gr.Group():
lora_ctrls = [] lora_ctrls = []
for i, (n, v) in enumerate(modules.config.default_loras): for i, (enabled, filename, weight) in enumerate(modules.config.default_loras):
with gr.Row(): with gr.Row():
lora_enabled = gr.Checkbox(label='Enable', value=True, lora_enabled = gr.Checkbox(label='Enable', value=enabled,
elem_classes=['lora_enable', 'min_check'], scale=1) elem_classes=['lora_enable', 'min_check'], scale=1)
lora_model = gr.Dropdown(label=f'LoRA {i + 1}', lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
choices=['None'] + modules.config.lora_filenames, value=n, choices=['None'] + modules.config.lora_filenames, value=filename,
elem_classes='lora_model', scale=5) elem_classes='lora_model', scale=5)
lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
maximum=modules.config.default_loras_max_weight, step=0.01, value=v, maximum=modules.config.default_loras_max_weight, step=0.01, value=weight,
elem_classes='lora_weight', scale=5) elem_classes='lora_weight', scale=5)
lora_ctrls += [lora_enabled, lora_model, lora_weight] lora_ctrls += [lora_enabled, lora_model, lora_weight]
with gr.Row(): with gr.Row():
model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
with gr.Tab(label='Advanced'): with gr.Tab(label='Advanced'):
guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01, guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
value=modules.config.default_cfg_scale, value=modules.config.default_cfg_scale,
@ -428,12 +436,13 @@ with shared.gradio_root:
disable_preview = gr.Checkbox(label='Disable Preview', value=False, disable_preview = gr.Checkbox(label='Disable Preview', value=False,
info='Disable preview during generation.') info='Disable preview during generation.')
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
value=modules.config.default_performance == 'Extreme Speed', value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
interactive=modules.config.default_performance != 'Extreme Speed', interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value,
info='Disable intermediate results during generation, only show final gallery.') info='Disable intermediate results during generation, only show final gallery.')
disable_seed_increment = gr.Checkbox(label='Disable seed increment', disable_seed_increment = gr.Checkbox(label='Disable seed increment',
info='Disable automatic seed increment when image number is > 1.', info='Disable automatic seed increment when image number is > 1.',
value=False) value=False)
read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
if not args_manager.args.disable_metadata: if not args_manager.args.disable_metadata:
save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
@ -512,24 +521,60 @@ with shared.gradio_root:
def dev_mode_checked(r): def dev_mode_checked(r):
return gr.update(visible=r) return gr.update(visible=r)
dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools], dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools],
queue=False, show_progress=False) queue=False, show_progress=False)
def model_refresh_clicked(): def refresh_files_clicked():
modules.config.update_all_model_names() modules.config.update_files()
results = [gr.update(choices=modules.config.model_filenames)] results = [gr.update(choices=modules.config.model_filenames)]
results += [gr.update(choices=['None'] + modules.config.model_filenames)] results += [gr.update(choices=['None'] + modules.config.model_filenames)]
if not args_manager.args.disable_preset_selection:
results += [gr.update(choices=modules.config.available_presets)]
for i in range(modules.config.default_max_lora_number): for i in range(modules.config.default_max_lora_number):
results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] results += [gr.update(interactive=True),
gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results return results
model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, refresh_files_output = [base_model, refiner_model]
if not args_manager.args.disable_preset_selection:
refresh_files_output += [preset_selection]
refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
queue=False, show_progress=False) queue=False, show_progress=False)
performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 + state_is_generating = gr.State(False)
[gr.update(visible=x != 'Extreme Speed')] * 1 +
[gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1, load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
if not args_manager.args.disable_preset_selection:
def preset_selection_change(preset, is_generating):
preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
default_model = preset_prepared.get('base_model')
previous_default_models = preset_prepared.get('previous_default_models', [])
checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
lora_downloads = preset_prepared.get('lora_downloads', {})
preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models(
default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads)
if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
del preset_prepared['prompt']
return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating)
preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
[gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
[gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1,
inputs=performance_selection, inputs=performance_selection,
outputs=[ outputs=[
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
@ -579,7 +624,8 @@ with shared.gradio_root:
ctrls = [currentTask, generate_image_grid] ctrls = [currentTask, generate_image_grid]
ctrls += [ ctrls += [
prompt, negative_prompt, style_selections, prompt, negative_prompt, style_selections,
performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
read_wildcards_in_order, sharpness, guidance_scale
] ]
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
@ -601,8 +647,6 @@ with shared.gradio_root:
ctrls += ip_ctrls ctrls += ip_ctrls
state_is_generating = gr.State(False)
def parse_meta(raw_prompt_txt, is_generating): def parse_meta(raw_prompt_txt, is_generating):
loaded_json = None loaded_json = None
if is_json(raw_prompt_txt): if is_json(raw_prompt_txt):
@ -618,13 +662,6 @@ with shared.gradio_root:
prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
def trigger_metadata_import(filepath, state_is_generating): def trigger_metadata_import(filepath, state_is_generating):
@ -638,7 +675,6 @@ with shared.gradio_root:
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
.then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)