commit
a1bda88aa3
@ -4,7 +4,10 @@ import os
|
||||
from tempfile import gettempdir
|
||||
|
||||
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
|
||||
|
||||
args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.")
|
||||
args_parser.parser.add_argument("--disable-preset-selection", action='store_true',
|
||||
help="Disables preset selection in Gradio.")
|
||||
|
||||
args_parser.parser.add_argument("--language", type=str, default='default',
|
||||
help="Translate UI using json files in [language] folder. "
|
||||
@ -49,7 +52,4 @@ if args_parser.args.disable_analytics:
|
||||
if args_parser.args.disable_in_browser:
|
||||
args_parser.args.in_browser = False
|
||||
|
||||
if args_parser.args.temp_path is None:
|
||||
args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus')
|
||||
|
||||
args = args_parser.args
|
||||
|
176
css/style.css
176
css/style.css
@ -1,5 +1,136 @@
|
||||
/* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */
|
||||
|
||||
.loader-container {
|
||||
display: flex; /* Use flex to align items horizontally */
|
||||
align-items: center; /* Center items vertically within the container */
|
||||
white-space: nowrap; /* Prevent line breaks within the container */
|
||||
}
|
||||
|
||||
.loader {
|
||||
border: 8px solid #f3f3f3; /* Light grey */
|
||||
border-top: 8px solid #3498db; /* Blue */
|
||||
border-radius: 50%;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
animation: spin 2s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
0% { transform: rotate(0deg); }
|
||||
100% { transform: rotate(360deg); }
|
||||
}
|
||||
|
||||
/* Style the progress bar */
|
||||
progress {
|
||||
appearance: none; /* Remove default styling */
|
||||
height: 20px; /* Set the height of the progress bar */
|
||||
border-radius: 5px; /* Round the corners of the progress bar */
|
||||
background-color: #f3f3f3; /* Light grey background */
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
/* Style the progress bar container */
|
||||
.progress-container {
|
||||
margin-left: 20px;
|
||||
margin-right: 20px;
|
||||
flex-grow: 1; /* Allow the progress container to take up remaining space */
|
||||
}
|
||||
|
||||
/* Set the color of the progress bar fill */
|
||||
progress::-webkit-progress-value {
|
||||
background-color: #3498db; /* Blue color for the fill */
|
||||
}
|
||||
|
||||
progress::-moz-progress-bar {
|
||||
background-color: #3498db; /* Blue color for the fill in Firefox */
|
||||
}
|
||||
|
||||
/* Style the text on the progress bar */
|
||||
progress::after {
|
||||
content: attr(value '%'); /* Display the progress value followed by '%' */
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
color: white; /* Set text color */
|
||||
font-size: 14px; /* Set font size */
|
||||
}
|
||||
|
||||
/* Style other texts */
|
||||
.loader-container > span {
|
||||
margin-left: 5px; /* Add spacing between the progress bar and the text */
|
||||
}
|
||||
|
||||
.progress-bar > .generating {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.progress-bar{
|
||||
height: 30px !important;
|
||||
}
|
||||
|
||||
.type_row{
|
||||
height: 80px !important;
|
||||
}
|
||||
|
||||
.type_row_half{
|
||||
height: 32px !important;
|
||||
}
|
||||
|
||||
.scroll-hide{
|
||||
resize: none !important;
|
||||
}
|
||||
|
||||
.refresh_button{
|
||||
border: none !important;
|
||||
background: none !important;
|
||||
font-size: none !important;
|
||||
box-shadow: none !important;
|
||||
}
|
||||
|
||||
.advanced_check_row{
|
||||
width: 250px !important;
|
||||
}
|
||||
|
||||
.min_check{
|
||||
min-width: min(1px, 100%) !important;
|
||||
}
|
||||
|
||||
.resizable_area {
|
||||
resize: vertical;
|
||||
overflow: auto !important;
|
||||
}
|
||||
|
||||
.aspect_ratios label {
|
||||
width: 140px !important;
|
||||
}
|
||||
|
||||
.aspect_ratios label span {
|
||||
white-space: nowrap !important;
|
||||
}
|
||||
|
||||
.aspect_ratios label input {
|
||||
margin-left: -5px !important;
|
||||
}
|
||||
|
||||
.lora_enable label {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.lora_enable label input {
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
.lora_enable label span {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@-moz-document url-prefix() {
|
||||
.lora_weight input[type=number] {
|
||||
width: 80px;
|
||||
}
|
||||
}
|
||||
|
||||
#context-menu{
|
||||
z-index:9999;
|
||||
position:absolute;
|
||||
@ -218,3 +349,48 @@
|
||||
#stylePreviewOverlay.lower-half {
|
||||
transform: translate(-140px, -140px);
|
||||
}
|
||||
|
||||
/* scrollable box for style selections */
|
||||
.contain .tabs {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.contain .tabs .tabitem.style_selections_tab {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.contain .tabs .tabitem.style_selections_tab > div:first-child {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.contain .tabs .tabitem.style_selections_tab .style_selections {
|
||||
min-height: 200px;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] {
|
||||
position: absolute; /* remove this to disable scrolling within the checkbox-group */
|
||||
overflow: auto;
|
||||
padding-right: 2px;
|
||||
max-height: 100%;
|
||||
}
|
||||
|
||||
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label {
|
||||
/* max-width: calc(35% - 15px) !important; */ /* add this to enable 3 columns layout */
|
||||
flex: calc(50% - 5px) !important;
|
||||
}
|
||||
|
||||
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label span {
|
||||
/* white-space:nowrap; */ /* add this to disable text wrapping (better choice for 3 columns layout) */
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
/* styles preview tooltip */
|
||||
.preview-tooltip {
|
||||
background-color: #fff8;
|
||||
font-family: monospace;
|
||||
text-align: center;
|
||||
border-radius-top: 5px;
|
||||
display: none; /* remove this to enable tooltip in preview image */
|
||||
}
|
@ -12,7 +12,7 @@
|
||||
"%cd /content\n",
|
||||
"!git clone https://github.com/lllyasviel/Fooocus.git\n",
|
||||
"%cd /content/Fooocus\n",
|
||||
"!python entry_with_update.py --share\n"
|
||||
"!python entry_with_update.py --share --always-high-vram\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -1 +1 @@
|
||||
version = '2.2.1'
|
||||
version = '2.3.0'
|
||||
|
@ -150,9 +150,12 @@ function initStylePreviewOverlay() {
|
||||
let overlayVisible = false;
|
||||
const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content")
|
||||
const overlay = document.createElement('div');
|
||||
const tooltip = document.createElement('div');
|
||||
tooltip.className = 'preview-tooltip';
|
||||
overlay.appendChild(tooltip);
|
||||
overlay.id = 'stylePreviewOverlay';
|
||||
document.body.appendChild(overlay);
|
||||
document.addEventListener('mouseover', function(e) {
|
||||
document.addEventListener('mouseover', function (e) {
|
||||
const label = e.target.closest('.style_selections label');
|
||||
if (!label) return;
|
||||
label.removeEventListener("mouseout", onMouseLeave);
|
||||
@ -162,9 +165,12 @@ function initStylePreviewOverlay() {
|
||||
const originalText = label.querySelector("span").getAttribute("data-original-text");
|
||||
const name = originalText || label.querySelector("span").textContent;
|
||||
overlay.style.backgroundImage = `url("${samplesPath.replace(
|
||||
"fooocus_v2",
|
||||
name.toLowerCase().replaceAll(" ", "_")
|
||||
"fooocus_v2",
|
||||
name.toLowerCase().replaceAll(" ", "_")
|
||||
).replaceAll("\\", "\\\\")}")`;
|
||||
|
||||
tooltip.textContent = name;
|
||||
|
||||
function onMouseLeave() {
|
||||
overlayVisible = false;
|
||||
overlay.style.opacity = "0";
|
||||
@ -172,8 +178,8 @@ function initStylePreviewOverlay() {
|
||||
label.removeEventListener("mouseout", onMouseLeave);
|
||||
}
|
||||
});
|
||||
document.addEventListener('mousemove', function(e) {
|
||||
if(!overlayVisible) return;
|
||||
document.addEventListener('mousemove', function (e) {
|
||||
if (!overlayVisible) return;
|
||||
overlay.style.left = `${e.clientX}px`;
|
||||
overlay.style.top = `${e.clientY}px`;
|
||||
overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half";
|
||||
|
@ -38,9 +38,12 @@
|
||||
"* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)",
|
||||
"Setting": "Setting",
|
||||
"Style": "Style",
|
||||
"Preset": "Preset",
|
||||
"Performance": "Performance",
|
||||
"Speed": "Speed",
|
||||
"Quality": "Quality",
|
||||
"Extreme Speed": "Extreme Speed",
|
||||
"Lightning": "Lightning",
|
||||
"Aspect Ratios": "Aspect Ratios",
|
||||
"width \u00d7 height": "width \u00d7 height",
|
||||
"Image Number": "Image Number",
|
||||
@ -50,6 +53,7 @@
|
||||
"Seed": "Seed",
|
||||
"Disable seed increment": "Disable seed increment",
|
||||
"Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.",
|
||||
"Read wildcards in order": "Read wildcards in order",
|
||||
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
|
||||
"Image Style": "Image Style",
|
||||
"Fooocus V2": "Fooocus V2",
|
||||
@ -367,7 +371,6 @@
|
||||
"B2": "B2",
|
||||
"S1": "S1",
|
||||
"S2": "S2",
|
||||
"Extreme Speed": "Extreme Speed",
|
||||
"\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...",
|
||||
"Type prompt here.": "Type prompt here.",
|
||||
"Outpaint Expansion Direction:": "Outpaint Expansion Direction:",
|
||||
@ -381,5 +384,6 @@
|
||||
"Metadata Scheme": "Metadata Scheme",
|
||||
"Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
|
||||
"fooocus (json)": "fooocus (json)",
|
||||
"a1111 (plain text)": "a1111 (plain text)"
|
||||
"a1111 (plain text)": "a1111 (plain text)",
|
||||
"Unsupported image type in input": "Unsupported image type in input"
|
||||
}
|
49
launch.py
49
launch.py
@ -1,6 +1,6 @@
|
||||
import os
|
||||
import sys
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
print('[System ARGV] ' + str(sys.argv))
|
||||
|
||||
@ -15,15 +15,13 @@ if "GRADIO_SERVER_PORT" not in os.environ:
|
||||
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
|
||||
import platform
|
||||
import fooocus_version
|
||||
|
||||
from build_launcher import build_launcher
|
||||
from modules.launch_util import is_installed, run, python, run_pip, requirements_met
|
||||
from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content
|
||||
from modules.model_loader import load_file_from_url
|
||||
|
||||
|
||||
REINSTALL_ALL = False
|
||||
TRY_INSTALL_XFORMERS = False
|
||||
|
||||
@ -68,6 +66,7 @@ vae_approx_filenames = [
|
||||
'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
|
||||
]
|
||||
|
||||
|
||||
def ini_args():
|
||||
from args_manager import args
|
||||
return args
|
||||
@ -77,15 +76,24 @@ prepare_environment()
|
||||
build_launcher()
|
||||
args = ini_args()
|
||||
|
||||
|
||||
if args.gpu_device_id is not None:
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
|
||||
print("Set device to:", args.gpu_device_id)
|
||||
|
||||
|
||||
from modules import config
|
||||
|
||||
def download_models():
|
||||
os.environ['GRADIO_TEMP_DIR'] = config.temp_path
|
||||
|
||||
if config.temp_path_cleanup_on_launch:
|
||||
print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
|
||||
result = delete_folder_content(config.temp_path, '[Cleanup] ')
|
||||
if result:
|
||||
print("[Cleanup] Cleanup successful")
|
||||
else:
|
||||
print(f"[Cleanup] Failed to delete content of temp dir.")
|
||||
|
||||
|
||||
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
|
||||
for file_name, url in vae_approx_filenames:
|
||||
load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
|
||||
|
||||
@ -97,31 +105,32 @@ def download_models():
|
||||
|
||||
if args.disable_preset_download:
|
||||
print('Skipped model download.')
|
||||
return
|
||||
return default_model, checkpoint_downloads
|
||||
|
||||
if not args.always_download_new_model:
|
||||
if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)):
|
||||
for alternative_model_name in config.previous_default_models:
|
||||
if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)):
|
||||
for alternative_model_name in previous_default_models:
|
||||
if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)):
|
||||
print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].')
|
||||
print(f'You do not have [{default_model}] but you have [{alternative_model_name}].')
|
||||
print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
|
||||
f'but you are not using latest models.')
|
||||
f'but you are not using the latest models.')
|
||||
print('Use --always-download-new-model to avoid fallback and always get new models.')
|
||||
config.checkpoint_downloads = {}
|
||||
config.default_base_model_name = alternative_model_name
|
||||
checkpoint_downloads = {}
|
||||
default_model = alternative_model_name
|
||||
break
|
||||
|
||||
for file_name, url in config.checkpoint_downloads.items():
|
||||
for file_name, url in checkpoint_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
|
||||
for file_name, url in config.embeddings_downloads.items():
|
||||
for file_name, url in embeddings_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
|
||||
for file_name, url in config.lora_downloads.items():
|
||||
for file_name, url in lora_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
|
||||
|
||||
return
|
||||
return default_model, checkpoint_downloads
|
||||
|
||||
|
||||
download_models()
|
||||
|
||||
config.default_base_model_name, config.checkpoint_downloads = download_models(
|
||||
config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
|
||||
config.embeddings_downloads, config.lora_downloads)
|
||||
|
||||
from webui import *
|
||||
|
@ -1,4 +1,5 @@
|
||||
import threading
|
||||
import re
|
||||
from modules.patch import PatchSettings, patch_settings, patch_all
|
||||
|
||||
patch_all()
|
||||
@ -45,8 +46,8 @@ def worker():
|
||||
from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays
|
||||
from modules.private_logger import log
|
||||
from extras.expansion import safe_str
|
||||
from modules.util import remove_empty_str, HWC3, resize_image, \
|
||||
get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix
|
||||
from modules.util import remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil, \
|
||||
get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix, get_enabled_loras
|
||||
from modules.upscaler import perform_upscale
|
||||
from modules.flags import Performance
|
||||
from modules.meta_parser import get_metadata_parser, MetadataScheme
|
||||
@ -123,14 +124,6 @@ def worker():
|
||||
async_task.results = async_task.results + [wall]
|
||||
return
|
||||
|
||||
def apply_enabled_loras(loras):
|
||||
enabled_loras = []
|
||||
for lora_enabled, lora_model, lora_weight in loras:
|
||||
if lora_enabled:
|
||||
enabled_loras.append([lora_model, lora_weight])
|
||||
|
||||
return enabled_loras
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def handler(async_task):
|
||||
@ -148,12 +141,13 @@ def worker():
|
||||
image_number = args.pop()
|
||||
output_format = args.pop()
|
||||
image_seed = args.pop()
|
||||
read_wildcards_in_order = args.pop()
|
||||
sharpness = args.pop()
|
||||
guidance_scale = args.pop()
|
||||
base_model_name = args.pop()
|
||||
refiner_model_name = args.pop()
|
||||
refiner_switch = args.pop()
|
||||
loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)])
|
||||
loras = get_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop())] for _ in range(modules.config.default_max_lora_number)])
|
||||
input_image_checkbox = args.pop()
|
||||
current_tab = args.pop()
|
||||
uov_method = args.pop()
|
||||
@ -250,6 +244,25 @@ def worker():
|
||||
adm_scaler_negative = 1.0
|
||||
adm_scaler_end = 0.0
|
||||
|
||||
elif performance_selection == Performance.LIGHTNING:
|
||||
print('Enter Lightning mode.')
|
||||
progressbar(async_task, 1, 'Downloading Lightning components ...')
|
||||
loras += [(modules.config.downloading_sdxl_lightning_lora(), 1.0)]
|
||||
|
||||
if refiner_model_name != 'None':
|
||||
print(f'Refiner disabled in Lightning mode.')
|
||||
|
||||
refiner_model_name = 'None'
|
||||
sampler_name = 'euler'
|
||||
scheduler_name = 'sgm_uniform'
|
||||
sharpness = 0.0
|
||||
guidance_scale = 1.0
|
||||
adaptive_cfg = 1.0
|
||||
refiner_switch = 1.0
|
||||
adm_scaler_positive = 1.0
|
||||
adm_scaler_negative = 1.0
|
||||
adm_scaler_end = 0.0
|
||||
|
||||
print(f'[Parameters] Adaptive CFG = {adaptive_cfg}')
|
||||
print(f'[Parameters] Sharpness = {sharpness}')
|
||||
print(f'[Parameters] ControlNet Softness = {controlnet_softness}')
|
||||
@ -347,7 +360,7 @@ def worker():
|
||||
print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
|
||||
if refiner_model_name == 'None':
|
||||
use_synthetic_refiner = True
|
||||
refiner_switch = 0.5
|
||||
refiner_switch = 0.8
|
||||
else:
|
||||
inpaint_head_model_path, inpaint_patch_model_path = None, None
|
||||
print(f'[Inpaint] Parameterized inpaint is disabled.')
|
||||
@ -422,16 +435,16 @@ def worker():
|
||||
|
||||
for i in range(image_number):
|
||||
if disable_seed_increment:
|
||||
task_seed = seed
|
||||
task_seed = seed % (constants.MAX_SEED + 1)
|
||||
else:
|
||||
task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
|
||||
|
||||
task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
|
||||
task_prompt = apply_wildcards(prompt, task_rng)
|
||||
task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order)
|
||||
task_prompt = apply_arrays(task_prompt, i)
|
||||
task_negative_prompt = apply_wildcards(negative_prompt, task_rng)
|
||||
task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts]
|
||||
task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts]
|
||||
task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order)
|
||||
task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_positive_prompts]
|
||||
task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_negative_prompts]
|
||||
|
||||
positive_basic_workloads = []
|
||||
negative_basic_workloads = []
|
||||
|
@ -3,12 +3,14 @@ import json
|
||||
import math
|
||||
import numbers
|
||||
import args_manager
|
||||
import tempfile
|
||||
import modules.flags
|
||||
import modules.sdxl_styles
|
||||
|
||||
from modules.model_loader import load_file_from_url
|
||||
from modules.util import get_files_from_folder, makedirs_with_log
|
||||
from modules.flags import Performance, MetadataScheme
|
||||
from modules.flags import OutputFormat, Performance, MetadataScheme
|
||||
|
||||
|
||||
def get_config_path(key, default_value):
|
||||
env = os.getenv(key)
|
||||
@ -18,6 +20,7 @@ def get_config_path(key, default_value):
|
||||
else:
|
||||
return os.path.abspath(default_value)
|
||||
|
||||
|
||||
config_path = get_config_path('config_path', "./config.txt")
|
||||
config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt")
|
||||
config_dict = {}
|
||||
@ -94,21 +97,44 @@ def try_load_deprecated_user_path_config():
|
||||
|
||||
try_load_deprecated_user_path_config()
|
||||
|
||||
|
||||
def get_presets():
|
||||
preset_folder = 'presets'
|
||||
presets = ['initial']
|
||||
if not os.path.exists(preset_folder):
|
||||
print('No presets found.')
|
||||
return presets
|
||||
|
||||
return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')]
|
||||
|
||||
|
||||
def try_get_preset_content(preset):
|
||||
if isinstance(preset, str):
|
||||
preset_path = os.path.abspath(f'./presets/{preset}.json')
|
||||
try:
|
||||
if os.path.exists(preset_path):
|
||||
with open(preset_path, "r", encoding="utf-8") as json_file:
|
||||
json_content = json.load(json_file)
|
||||
print(f'Loaded preset: {preset_path}')
|
||||
return json_content
|
||||
else:
|
||||
raise FileNotFoundError
|
||||
except Exception as e:
|
||||
print(f'Load preset [{preset_path}] failed')
|
||||
print(e)
|
||||
return {}
|
||||
|
||||
|
||||
try:
|
||||
with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file:
|
||||
config_dict.update(json.load(json_file))
|
||||
except Exception as e:
|
||||
print(f'Load default preset failed.')
|
||||
print(e)
|
||||
|
||||
available_presets = get_presets()
|
||||
preset = args_manager.args.preset
|
||||
|
||||
if isinstance(preset, str):
|
||||
preset_path = os.path.abspath(f'./presets/{preset}.json')
|
||||
try:
|
||||
if os.path.exists(preset_path):
|
||||
with open(preset_path, "r", encoding="utf-8") as json_file:
|
||||
config_dict.update(json.load(json_file))
|
||||
print(f'Loaded preset: {preset_path}')
|
||||
else:
|
||||
raise FileNotFoundError
|
||||
except Exception as e:
|
||||
print(f'Load preset [{preset_path}] failed')
|
||||
print(e)
|
||||
|
||||
config_dict.update(try_get_preset_content(preset))
|
||||
|
||||
def get_path_output() -> str:
|
||||
"""
|
||||
@ -117,7 +143,7 @@ def get_path_output() -> str:
|
||||
global config_dict
|
||||
path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True)
|
||||
if args_manager.args.output_path:
|
||||
print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}')
|
||||
print(f'Overriding config value path_outputs with {args_manager.args.output_path}')
|
||||
config_dict['path_outputs'] = path_output = args_manager.args.output_path
|
||||
return path_output
|
||||
|
||||
@ -176,8 +202,10 @@ path_inpaint = get_dir_or_set_default('path_inpaint', '../models/inpaint/')
|
||||
path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/')
|
||||
path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/')
|
||||
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
|
||||
path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
|
||||
path_outputs = get_path_output()
|
||||
|
||||
|
||||
def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False):
|
||||
global config_dict, visited_keys
|
||||
|
||||
@ -206,7 +234,37 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_
|
||||
return default_value
|
||||
|
||||
|
||||
default_base_model_name = get_config_item_or_set_default(
|
||||
def init_temp_path(path: str | None, default_path: str) -> str:
|
||||
if args_manager.args.temp_path:
|
||||
path = args_manager.args.temp_path
|
||||
|
||||
if path != '' and path != default_path:
|
||||
try:
|
||||
if not os.path.isabs(path):
|
||||
path = os.path.abspath(path)
|
||||
os.makedirs(path, exist_ok=True)
|
||||
print(f'Using temp path {path}')
|
||||
return path
|
||||
except Exception as e:
|
||||
print(f'Could not create temp path {path}. Reason: {e}')
|
||||
print(f'Using default temp path {default_path} instead.')
|
||||
|
||||
os.makedirs(default_path, exist_ok=True)
|
||||
return default_path
|
||||
|
||||
|
||||
default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus')
|
||||
temp_path = init_temp_path(get_config_item_or_set_default(
|
||||
key='temp_path',
|
||||
default_value=default_temp_path,
|
||||
validator=lambda x: isinstance(x, str),
|
||||
), default_temp_path)
|
||||
temp_path_cleanup_on_launch = get_config_item_or_set_default(
|
||||
key='temp_path_cleanup_on_launch',
|
||||
default_value=True,
|
||||
validator=lambda x: isinstance(x, bool)
|
||||
)
|
||||
default_base_model_name = default_model = get_config_item_or_set_default(
|
||||
key='default_model',
|
||||
default_value='model.safetensors',
|
||||
validator=lambda x: isinstance(x, str)
|
||||
@ -216,7 +274,7 @@ previous_default_models = get_config_item_or_set_default(
|
||||
default_value=[],
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x)
|
||||
)
|
||||
default_refiner_model_name = get_config_item_or_set_default(
|
||||
default_refiner_model_name = default_refiner = get_config_item_or_set_default(
|
||||
key='default_refiner',
|
||||
default_value='None',
|
||||
validator=lambda x: isinstance(x, str)
|
||||
@ -240,28 +298,37 @@ default_loras = get_config_item_or_set_default(
|
||||
key='default_loras',
|
||||
default_value=[
|
||||
[
|
||||
True,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
True,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
True,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
True,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
True,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
],
|
||||
validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x)
|
||||
validator=lambda x: isinstance(x, list) and all(
|
||||
len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number)
|
||||
or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number)
|
||||
for y in x)
|
||||
)
|
||||
default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras]
|
||||
default_max_lora_number = get_config_item_or_set_default(
|
||||
key='default_max_lora_number',
|
||||
default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5,
|
||||
@ -326,7 +393,7 @@ default_max_image_number = get_config_item_or_set_default(
|
||||
default_output_format = get_config_item_or_set_default(
|
||||
key='default_output_format',
|
||||
default_value='png',
|
||||
validator=lambda x: x in modules.flags.output_formats
|
||||
validator=lambda x: x in OutputFormat.list()
|
||||
)
|
||||
default_image_number = get_config_item_or_set_default(
|
||||
key='default_image_number',
|
||||
@ -411,29 +478,30 @@ example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
|
||||
|
||||
config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]
|
||||
|
||||
possible_preset_keys = [
|
||||
"default_model",
|
||||
"default_refiner",
|
||||
"default_refiner_switch",
|
||||
"default_loras_min_weight",
|
||||
"default_loras_max_weight",
|
||||
"default_loras",
|
||||
"default_max_lora_number",
|
||||
"default_cfg_scale",
|
||||
"default_sample_sharpness",
|
||||
"default_sampler",
|
||||
"default_scheduler",
|
||||
"default_performance",
|
||||
"default_prompt",
|
||||
"default_prompt_negative",
|
||||
"default_styles",
|
||||
"default_aspect_ratio",
|
||||
"default_save_metadata_to_images",
|
||||
"checkpoint_downloads",
|
||||
"embeddings_downloads",
|
||||
"lora_downloads",
|
||||
]
|
||||
|
||||
# mapping config to meta parameter
|
||||
possible_preset_keys = {
|
||||
"default_model": "base_model",
|
||||
"default_refiner": "refiner_model",
|
||||
"default_refiner_switch": "refiner_switch",
|
||||
"previous_default_models": "previous_default_models",
|
||||
"default_loras_min_weight": "default_loras_min_weight",
|
||||
"default_loras_max_weight": "default_loras_max_weight",
|
||||
"default_loras": "<processed>",
|
||||
"default_cfg_scale": "guidance_scale",
|
||||
"default_sample_sharpness": "sharpness",
|
||||
"default_sampler": "sampler",
|
||||
"default_scheduler": "scheduler",
|
||||
"default_overwrite_step": "steps",
|
||||
"default_performance": "performance",
|
||||
"default_prompt": "prompt",
|
||||
"default_prompt_negative": "negative_prompt",
|
||||
"default_styles": "styles",
|
||||
"default_aspect_ratio": "resolution",
|
||||
"default_save_metadata_to_images": "default_save_metadata_to_images",
|
||||
"checkpoint_downloads": "checkpoint_downloads",
|
||||
"embeddings_downloads": "embeddings_downloads",
|
||||
"lora_downloads": "lora_downloads"
|
||||
}
|
||||
|
||||
REWRITE_PRESET = False
|
||||
|
||||
@ -474,21 +542,27 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
|
||||
|
||||
model_filenames = []
|
||||
lora_filenames = []
|
||||
wildcard_filenames = []
|
||||
|
||||
sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
|
||||
sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors'
|
||||
|
||||
|
||||
def get_model_filenames(folder_paths, name_filter=None):
|
||||
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
|
||||
def get_model_filenames(folder_paths, extensions=None, name_filter=None):
|
||||
if extensions is None:
|
||||
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
|
||||
files = []
|
||||
for folder in folder_paths:
|
||||
files += get_files_from_folder(folder, extensions, name_filter)
|
||||
return files
|
||||
|
||||
|
||||
def update_all_model_names():
|
||||
global model_filenames, lora_filenames
|
||||
def update_files():
|
||||
global model_filenames, lora_filenames, wildcard_filenames, available_presets
|
||||
model_filenames = get_model_filenames(paths_checkpoints)
|
||||
lora_filenames = get_model_filenames(paths_loras)
|
||||
wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
|
||||
available_presets = get_presets()
|
||||
return
|
||||
|
||||
|
||||
@ -538,6 +612,14 @@ def downloading_sdxl_lcm_lora():
|
||||
)
|
||||
return sdxl_lcm_lora
|
||||
|
||||
def downloading_sdxl_lightning_lora():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_4step_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_lightning_lora
|
||||
)
|
||||
return sdxl_lightning_lora
|
||||
|
||||
|
||||
def downloading_controlnet_canny():
|
||||
load_file_from_url(
|
||||
@ -604,4 +686,4 @@ def downloading_upscale_model():
|
||||
return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
|
||||
|
||||
|
||||
update_all_model_names()
|
||||
update_files()
|
||||
|
@ -73,14 +73,14 @@ class StableDiffusionModel:
|
||||
|
||||
loras_to_load = []
|
||||
|
||||
for name, weight in loras:
|
||||
if name == 'None':
|
||||
for filename, weight in loras:
|
||||
if filename == 'None':
|
||||
continue
|
||||
|
||||
if os.path.exists(name):
|
||||
lora_filename = name
|
||||
if os.path.exists(filename):
|
||||
lora_filename = filename
|
||||
else:
|
||||
lora_filename = get_file_from_folder_list(name, modules.config.paths_loras)
|
||||
lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras)
|
||||
|
||||
if not os.path.exists(lora_filename):
|
||||
print(f'Lora file not found: {lora_filename}')
|
||||
|
@ -11,7 +11,7 @@ from extras.expansion import FooocusExpansion
|
||||
|
||||
from ldm_patched.modules.model_base import SDXL, SDXLRefiner
|
||||
from modules.sample_hijack import clip_separate
|
||||
from modules.util import get_file_from_folder_list
|
||||
from modules.util import get_file_from_folder_list, get_enabled_loras
|
||||
|
||||
|
||||
model_base = core.StableDiffusionModel()
|
||||
@ -254,7 +254,7 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
|
||||
refresh_everything(
|
||||
refiner_model_name=modules.config.default_refiner_model_name,
|
||||
base_model_name=modules.config.default_base_model_name,
|
||||
loras=modules.config.default_loras
|
||||
loras=get_enabled_loras(modules.config.default_loras)
|
||||
)
|
||||
|
||||
|
||||
|
@ -67,7 +67,7 @@ default_parameters = {
|
||||
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
|
||||
} # stop, weight
|
||||
|
||||
output_formats = ['png', 'jpg', 'webp']
|
||||
output_formats = ['png', 'jpeg', 'webp']
|
||||
|
||||
inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
|
||||
inpaint_option_default = 'Inpaint or Outpaint (default)'
|
||||
@ -89,37 +89,51 @@ metadata_scheme = [
|
||||
(f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value),
|
||||
]
|
||||
|
||||
lora_count = 5
|
||||
|
||||
controlnet_image_count = 4
|
||||
|
||||
|
||||
class OutputFormat(Enum):
|
||||
PNG = 'png'
|
||||
JPEG = 'jpeg'
|
||||
WEBP = 'webp'
|
||||
|
||||
@classmethod
|
||||
def list(cls) -> list:
|
||||
return list(map(lambda c: c.value, cls))
|
||||
|
||||
|
||||
class Steps(IntEnum):
|
||||
QUALITY = 60
|
||||
SPEED = 30
|
||||
EXTREME_SPEED = 8
|
||||
LIGHTNING = 4
|
||||
|
||||
|
||||
class StepsUOV(IntEnum):
|
||||
QUALITY = 36
|
||||
SPEED = 18
|
||||
EXTREME_SPEED = 8
|
||||
LIGHTNING = 4
|
||||
|
||||
|
||||
class Performance(Enum):
|
||||
QUALITY = 'Quality'
|
||||
SPEED = 'Speed'
|
||||
EXTREME_SPEED = 'Extreme Speed'
|
||||
LIGHTNING = 'Lightning'
|
||||
|
||||
@classmethod
|
||||
def list(cls) -> list:
|
||||
return list(map(lambda c: c.value, cls))
|
||||
|
||||
@classmethod
|
||||
def has_restricted_features(cls, x) -> bool:
|
||||
if isinstance(x, Performance):
|
||||
x = x.value
|
||||
return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value]
|
||||
|
||||
def steps(self) -> int | None:
|
||||
return Steps[self.name].value if Steps[self.name] else None
|
||||
|
||||
def steps_uov(self) -> int | None:
|
||||
return StepsUOV[self.name].value if Steps[self.name] else None
|
||||
|
||||
|
||||
performance_selections = Performance.list()
|
||||
|
@ -17,7 +17,7 @@ from gradio_client.documentation import document, set_documentation_group
|
||||
from gradio_client.serializing import ImgSerializable
|
||||
from PIL import Image as _Image # using _ to minimize namespace pollution
|
||||
|
||||
from gradio import processing_utils, utils
|
||||
from gradio import processing_utils, utils, Error
|
||||
from gradio.components.base import IOComponent, _Keywords, Block
|
||||
from gradio.deprecation import warn_style_method_deprecation
|
||||
from gradio.events import (
|
||||
@ -275,7 +275,10 @@ class Image(
|
||||
x, mask = x["image"], x["mask"]
|
||||
|
||||
assert isinstance(x, str)
|
||||
im = processing_utils.decode_base64_to_image(x)
|
||||
try:
|
||||
im = processing_utils.decode_base64_to_image(x)
|
||||
except PIL.UnidentifiedImageError:
|
||||
raise Error("Unsupported image type in input")
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
im = im.convert(self.image_mode)
|
||||
|
133
modules/html.py
133
modules/html.py
@ -1,136 +1,3 @@
|
||||
css = '''
|
||||
.loader-container {
|
||||
display: flex; /* Use flex to align items horizontally */
|
||||
align-items: center; /* Center items vertically within the container */
|
||||
white-space: nowrap; /* Prevent line breaks within the container */
|
||||
}
|
||||
|
||||
.loader {
|
||||
border: 8px solid #f3f3f3; /* Light grey */
|
||||
border-top: 8px solid #3498db; /* Blue */
|
||||
border-radius: 50%;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
animation: spin 2s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
0% { transform: rotate(0deg); }
|
||||
100% { transform: rotate(360deg); }
|
||||
}
|
||||
|
||||
/* Style the progress bar */
|
||||
progress {
|
||||
appearance: none; /* Remove default styling */
|
||||
height: 20px; /* Set the height of the progress bar */
|
||||
border-radius: 5px; /* Round the corners of the progress bar */
|
||||
background-color: #f3f3f3; /* Light grey background */
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
/* Style the progress bar container */
|
||||
.progress-container {
|
||||
margin-left: 20px;
|
||||
margin-right: 20px;
|
||||
flex-grow: 1; /* Allow the progress container to take up remaining space */
|
||||
}
|
||||
|
||||
/* Set the color of the progress bar fill */
|
||||
progress::-webkit-progress-value {
|
||||
background-color: #3498db; /* Blue color for the fill */
|
||||
}
|
||||
|
||||
progress::-moz-progress-bar {
|
||||
background-color: #3498db; /* Blue color for the fill in Firefox */
|
||||
}
|
||||
|
||||
/* Style the text on the progress bar */
|
||||
progress::after {
|
||||
content: attr(value '%'); /* Display the progress value followed by '%' */
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
color: white; /* Set text color */
|
||||
font-size: 14px; /* Set font size */
|
||||
}
|
||||
|
||||
/* Style other texts */
|
||||
.loader-container > span {
|
||||
margin-left: 5px; /* Add spacing between the progress bar and the text */
|
||||
}
|
||||
|
||||
.progress-bar > .generating {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.progress-bar{
|
||||
height: 30px !important;
|
||||
}
|
||||
|
||||
.type_row{
|
||||
height: 80px !important;
|
||||
}
|
||||
|
||||
.type_row_half{
|
||||
height: 32px !important;
|
||||
}
|
||||
|
||||
.scroll-hide{
|
||||
resize: none !important;
|
||||
}
|
||||
|
||||
.refresh_button{
|
||||
border: none !important;
|
||||
background: none !important;
|
||||
font-size: none !important;
|
||||
box-shadow: none !important;
|
||||
}
|
||||
|
||||
.advanced_check_row{
|
||||
width: 250px !important;
|
||||
}
|
||||
|
||||
.min_check{
|
||||
min-width: min(1px, 100%) !important;
|
||||
}
|
||||
|
||||
.resizable_area {
|
||||
resize: vertical;
|
||||
overflow: auto !important;
|
||||
}
|
||||
|
||||
.aspect_ratios label {
|
||||
width: 140px !important;
|
||||
}
|
||||
|
||||
.aspect_ratios label span {
|
||||
white-space: nowrap !important;
|
||||
}
|
||||
|
||||
.aspect_ratios label input {
|
||||
margin-left: -5px !important;
|
||||
}
|
||||
|
||||
.lora_enable label {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.lora_enable label input {
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
.lora_enable label span {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@-moz-document url-prefix() {
|
||||
.lora_weight input[type=number] {
|
||||
width: 80px;
|
||||
}
|
||||
}
|
||||
|
||||
'''
|
||||
progress_html = '''
|
||||
<div class="loader-container">
|
||||
<div class="loader"></div>
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import importlib
|
||||
import importlib.util
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import re
|
||||
@ -9,9 +10,6 @@ import importlib.metadata
|
||||
import packaging.version
|
||||
from packaging.requirements import Requirement
|
||||
|
||||
|
||||
|
||||
|
||||
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
|
||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||
|
||||
@ -101,3 +99,19 @@ def requirements_met(requirements_file):
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def delete_folder_content(folder, prefix=None):
|
||||
result = True
|
||||
|
||||
for filename in os.listdir(folder):
|
||||
file_path = os.path.join(folder, filename)
|
||||
try:
|
||||
if os.path.isfile(file_path) or os.path.islink(file_path):
|
||||
os.unlink(file_path)
|
||||
elif os.path.isdir(file_path):
|
||||
shutil.rmtree(file_path)
|
||||
except Exception as e:
|
||||
print(f'{prefix}Failed to delete {file_path}. Reason: {e}')
|
||||
result = False
|
||||
|
||||
return result
|
@ -210,8 +210,7 @@ def parse_meta_from_preset(preset_content):
|
||||
height = height[:height.index(" ")]
|
||||
preset_prepared[meta_key] = (width, height)
|
||||
else:
|
||||
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[
|
||||
settings_key] is not None else getattr(modules.config, settings_key)
|
||||
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key)
|
||||
|
||||
if settings_key == "default_styles" or settings_key == "default_aspect_ratio":
|
||||
preset_prepared[meta_key] = str(preset_prepared[meta_key])
|
||||
|
@ -6,8 +6,9 @@ import urllib.parse
|
||||
|
||||
from PIL import Image
|
||||
from PIL.PngImagePlugin import PngInfo
|
||||
from modules.util import generate_temp_filename
|
||||
from modules.flags import OutputFormat
|
||||
from modules.meta_parser import MetadataParser, get_exif
|
||||
from modules.util import generate_temp_filename
|
||||
|
||||
log_cache = {}
|
||||
|
||||
@ -21,7 +22,7 @@ def get_current_html_path(output_format=None):
|
||||
|
||||
|
||||
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str:
|
||||
path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
|
||||
path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
|
||||
output_format = output_format if output_format else modules.config.default_output_format
|
||||
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
|
||||
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)
|
||||
@ -29,7 +30,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
|
||||
parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else ''
|
||||
image = Image.fromarray(img)
|
||||
|
||||
if output_format == 'png':
|
||||
if output_format == OutputFormat.PNG.value:
|
||||
if parsed_parameters != '':
|
||||
pnginfo = PngInfo()
|
||||
pnginfo.add_text('parameters', parsed_parameters)
|
||||
@ -37,9 +38,9 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
|
||||
else:
|
||||
pnginfo = None
|
||||
image.save(local_temp_filename, pnginfo=pnginfo)
|
||||
elif output_format == 'jpg':
|
||||
elif output_format == OutputFormat.JPEG.value:
|
||||
image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
|
||||
elif output_format == 'webp':
|
||||
elif output_format == OutputFormat.WEBP.value:
|
||||
image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
|
||||
else:
|
||||
image.save(local_temp_filename)
|
||||
|
@ -2,13 +2,12 @@ import os
|
||||
import re
|
||||
import json
|
||||
import math
|
||||
import modules.config
|
||||
|
||||
from modules.util import get_files_from_folder
|
||||
|
||||
|
||||
# cannot use modules.config - validators causing circular imports
|
||||
styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/'))
|
||||
wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/'))
|
||||
wildcards_max_bfs_depth = 64
|
||||
|
||||
|
||||
@ -60,7 +59,7 @@ def apply_style(style, positive):
|
||||
return p.replace('{prompt}', positive).splitlines(), n.splitlines()
|
||||
|
||||
|
||||
def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
|
||||
def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order):
|
||||
for _ in range(wildcards_max_bfs_depth):
|
||||
placeholders = re.findall(r'__([\w-]+)__', wildcard_text)
|
||||
if len(placeholders) == 0:
|
||||
@ -69,10 +68,14 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
|
||||
print(f'[Wildcards] processing: {wildcard_text}')
|
||||
for placeholder in placeholders:
|
||||
try:
|
||||
words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines()
|
||||
matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder]
|
||||
words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines()
|
||||
words = [x for x in words if x != '']
|
||||
assert len(words) > 0
|
||||
wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
|
||||
if read_wildcards_in_order:
|
||||
wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1)
|
||||
else:
|
||||
wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
|
||||
except:
|
||||
print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. '
|
||||
f'Using "{placeholder}" as a normal word.')
|
||||
@ -82,8 +85,9 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
|
||||
print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
|
||||
return wildcard_text
|
||||
|
||||
|
||||
def get_words(arrays, totalMult, index):
|
||||
if(len(arrays) == 1):
|
||||
if len(arrays) == 1:
|
||||
return [arrays[0].split(',')[index]]
|
||||
else:
|
||||
words = arrays[0].split(',')
|
||||
|
@ -163,7 +163,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
|
||||
return date_string, os.path.abspath(result), filename
|
||||
|
||||
|
||||
def get_files_from_folder(folder_path, exensions=None, name_filter=None):
|
||||
def get_files_from_folder(folder_path, extensions=None, name_filter=None):
|
||||
if not os.path.isdir(folder_path):
|
||||
raise ValueError("Folder path is not a valid directory.")
|
||||
|
||||
@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None):
|
||||
relative_path = ""
|
||||
for filename in sorted(files, key=lambda s: s.casefold()):
|
||||
_, file_extension = os.path.splitext(filename)
|
||||
if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _):
|
||||
if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _):
|
||||
path = os.path.join(relative_path, filename)
|
||||
filenames.append(path)
|
||||
|
||||
@ -360,3 +360,7 @@ def makedirs_with_log(path):
|
||||
os.makedirs(path, exist_ok=True)
|
||||
except OSError as error:
|
||||
print(f'Directory {path} could not be created, reason: {error}')
|
||||
|
||||
|
||||
def get_enabled_loras(loras: list) -> list:
|
||||
return [[lora[1], lora[2]] for lora in loras if lora[0]]
|
||||
|
@ -4,22 +4,27 @@
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
@ -33,7 +38,7 @@
|
||||
"default_prompt_negative": "",
|
||||
"default_styles": [
|
||||
"Fooocus V2",
|
||||
"Fooocus Negative",
|
||||
"Fooocus Semi Realistic",
|
||||
"Fooocus Masterpiece"
|
||||
],
|
||||
"default_aspect_ratio": "896*1152",
|
||||
|
@ -4,22 +4,27 @@
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"sd_xl_offset_example-lora_1.0.safetensors",
|
||||
0.1
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
|
@ -4,22 +4,27 @@
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
|
57
presets/lightning.json
Normal file
57
presets/lightning.json
Normal file
@ -0,0 +1,57 @@
|
||||
{
|
||||
"default_model": "juggernautXL_v8Rundiffusion.safetensors",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
],
|
||||
"default_cfg_scale": 4.0,
|
||||
"default_sample_sharpness": 2.0,
|
||||
"default_sampler": "dpmpp_2m_sde_gpu",
|
||||
"default_scheduler": "karras",
|
||||
"default_performance": "Lightning",
|
||||
"default_prompt": "",
|
||||
"default_prompt_negative": "",
|
||||
"default_styles": [
|
||||
"Fooocus V2",
|
||||
"Fooocus Enhance",
|
||||
"Fooocus Sharp"
|
||||
],
|
||||
"default_aspect_ratio": "1152*896",
|
||||
"checkpoint_downloads": {
|
||||
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {},
|
||||
"previous_default_models": [
|
||||
"juggernautXL_version8Rundiffusion.safetensors",
|
||||
"juggernautXL_version7Rundiffusion.safetensors",
|
||||
"juggernautXL_v7Rundiffusion.safetensors",
|
||||
"juggernautXL_version6Rundiffusion.safetensors",
|
||||
"juggernautXL_v6Rundiffusion.safetensors"
|
||||
]
|
||||
}
|
@ -1,25 +1,30 @@
|
||||
{
|
||||
"default_model": "realisticStockPhoto_v20.safetensors",
|
||||
"default_refiner": "",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
|
||||
0.25
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
|
@ -4,22 +4,27 @@
|
||||
"default_refiner_switch": 0.75,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"sd_xl_offset_example-lora_1.0.safetensors",
|
||||
0.5
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
|
14
readme.md
14
readme.md
@ -84,6 +84,10 @@ The first time you launch the software, it will automatically download models:
|
||||
|
||||
After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
|
||||
|
||||
After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior:
|
||||
* Use `--disable-preset-selection` to disable preset selection in the browser.
|
||||
* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output.
|
||||
|
||||

|
||||
|
||||
If you already have these files, you can copy them to the above locations to speed up installation.
|
||||
@ -115,17 +119,21 @@ See also the common problems and troubleshoots [here](troubleshoot.md).
|
||||
|
||||
### Colab
|
||||
|
||||
(Last tested - 2023 Dec 12)
|
||||
(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t))
|
||||
|
||||
| Colab | Info
|
||||
| --- | --- |
|
||||
[](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official
|
||||
|
||||
In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition.
|
||||
In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition.
|
||||
|
||||
You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page.
|
||||
|
||||
Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
|
||||
|
||||
Thanks to [camenduru](https://github.com/camenduru)!
|
||||
Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346).
|
||||
|
||||
Thanks to [camenduru](https://github.com/camenduru) for the template!
|
||||
|
||||
### Linux (Using Anaconda)
|
||||
|
||||
|
BIN
sdxl_styles/samples/fooocus_semi_realistic.jpg
Normal file
BIN
sdxl_styles/samples/fooocus_semi_realistic.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 8.4 KiB |
@ -3,6 +3,10 @@
|
||||
"name": "Fooocus Enhance",
|
||||
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
|
||||
},
|
||||
{
|
||||
"name": "Fooocus Semi Realistic",
|
||||
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
|
||||
},
|
||||
{
|
||||
"name": "Fooocus Sharp",
|
||||
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
||||
|
@ -1,3 +1,14 @@
|
||||
# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0)
|
||||
|
||||
* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors))
|
||||
* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch.
|
||||
* Improve face swap consistency by switching later in the process to (synthetic) refiner
|
||||
* Add temp path cleanup on startup
|
||||
* Add support for wildcard subdirectories
|
||||
* Add scrollable 2 column layout for styles for better structure
|
||||
* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features
|
||||
* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images)
|
||||
|
||||
# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1)
|
||||
|
||||
* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox)
|
||||
|
102
webui.py
102
webui.py
@ -15,6 +15,7 @@ import modules.style_sorter as style_sorter
|
||||
import modules.meta_parser
|
||||
import args_manager
|
||||
import copy
|
||||
import launch
|
||||
|
||||
from modules.sdxl_styles import legal_style_names
|
||||
from modules.private_logger import get_current_html_path
|
||||
@ -28,12 +29,16 @@ def get_task(*args):
|
||||
|
||||
return worker.AsyncTask(args=args)
|
||||
|
||||
def generate_clicked(task):
|
||||
def generate_clicked(task: worker.AsyncTask):
|
||||
import ldm_patched.modules.model_management as model_management
|
||||
|
||||
with model_management.interrupt_processing_mutex:
|
||||
model_management.interrupt_processing = False
|
||||
# outputs=[progress_html, progress_window, progress_gallery, gallery]
|
||||
|
||||
if len(task.args) == 0:
|
||||
return
|
||||
|
||||
execution_start_time = time.perf_counter()
|
||||
finished = False
|
||||
|
||||
@ -91,9 +96,7 @@ title = f'Fooocus {fooocus_version.version}'
|
||||
if isinstance(args_manager.args.preset, str):
|
||||
title += ' ' + args_manager.args.preset
|
||||
|
||||
shared.gradio_root = gr.Blocks(
|
||||
title=title,
|
||||
css=modules.html.css).queue()
|
||||
shared.gradio_root = gr.Blocks(title=title).queue()
|
||||
|
||||
with shared.gradio_root:
|
||||
currentTask = gr.State(worker.AsyncTask(args=[]))
|
||||
@ -254,8 +257,13 @@ with shared.gradio_root:
|
||||
|
||||
with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
|
||||
with gr.Tab(label='Setting'):
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
preset_selection = gr.Radio(label='Preset',
|
||||
choices=modules.config.available_presets,
|
||||
value=args_manager.args.preset if args_manager.args.preset else "initial",
|
||||
interactive=True)
|
||||
performance_selection = gr.Radio(label='Performance',
|
||||
choices=modules.flags.performance_selections,
|
||||
choices=flags.Performance.list(),
|
||||
value=modules.config.default_performance)
|
||||
aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios,
|
||||
value=modules.config.default_aspect_ratio, info='width × height',
|
||||
@ -263,7 +271,7 @@ with shared.gradio_root:
|
||||
image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
|
||||
|
||||
output_format = gr.Radio(label='Output Format',
|
||||
choices=modules.flags.output_formats,
|
||||
choices=flags.OutputFormat.list(),
|
||||
value=modules.config.default_output_format)
|
||||
|
||||
negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
|
||||
@ -300,7 +308,7 @@ with shared.gradio_root:
|
||||
history_link = gr.HTML()
|
||||
shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
|
||||
|
||||
with gr.Tab(label='Style'):
|
||||
with gr.Tab(label='Style', elem_classes=['style_selections_tab']):
|
||||
style_sorter.try_load_sorted_styles(
|
||||
style_names=legal_style_names,
|
||||
default_selected=modules.config.default_styles)
|
||||
@ -353,20 +361,20 @@ with shared.gradio_root:
|
||||
with gr.Group():
|
||||
lora_ctrls = []
|
||||
|
||||
for i, (n, v) in enumerate(modules.config.default_loras):
|
||||
for i, (enabled, filename, weight) in enumerate(modules.config.default_loras):
|
||||
with gr.Row():
|
||||
lora_enabled = gr.Checkbox(label='Enable', value=True,
|
||||
lora_enabled = gr.Checkbox(label='Enable', value=enabled,
|
||||
elem_classes=['lora_enable', 'min_check'], scale=1)
|
||||
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
|
||||
choices=['None'] + modules.config.lora_filenames, value=n,
|
||||
choices=['None'] + modules.config.lora_filenames, value=filename,
|
||||
elem_classes='lora_model', scale=5)
|
||||
lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
|
||||
maximum=modules.config.default_loras_max_weight, step=0.01, value=v,
|
||||
maximum=modules.config.default_loras_max_weight, step=0.01, value=weight,
|
||||
elem_classes='lora_weight', scale=5)
|
||||
lora_ctrls += [lora_enabled, lora_model, lora_weight]
|
||||
|
||||
with gr.Row():
|
||||
model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
|
||||
refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
|
||||
with gr.Tab(label='Advanced'):
|
||||
guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
|
||||
value=modules.config.default_cfg_scale,
|
||||
@ -428,12 +436,13 @@ with shared.gradio_root:
|
||||
disable_preview = gr.Checkbox(label='Disable Preview', value=False,
|
||||
info='Disable preview during generation.')
|
||||
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
|
||||
value=modules.config.default_performance == 'Extreme Speed',
|
||||
interactive=modules.config.default_performance != 'Extreme Speed',
|
||||
value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
|
||||
interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value,
|
||||
info='Disable intermediate results during generation, only show final gallery.')
|
||||
disable_seed_increment = gr.Checkbox(label='Disable seed increment',
|
||||
info='Disable automatic seed increment when image number is > 1.',
|
||||
value=False)
|
||||
read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
|
||||
|
||||
if not args_manager.args.disable_metadata:
|
||||
save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
|
||||
@ -512,24 +521,60 @@ with shared.gradio_root:
|
||||
def dev_mode_checked(r):
|
||||
return gr.update(visible=r)
|
||||
|
||||
|
||||
dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools],
|
||||
queue=False, show_progress=False)
|
||||
|
||||
def model_refresh_clicked():
|
||||
modules.config.update_all_model_names()
|
||||
def refresh_files_clicked():
|
||||
modules.config.update_files()
|
||||
results = [gr.update(choices=modules.config.model_filenames)]
|
||||
results += [gr.update(choices=['None'] + modules.config.model_filenames)]
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
results += [gr.update(choices=modules.config.available_presets)]
|
||||
for i in range(modules.config.default_max_lora_number):
|
||||
results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
|
||||
results += [gr.update(interactive=True),
|
||||
gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
|
||||
return results
|
||||
|
||||
model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls,
|
||||
refresh_files_output = [base_model, refiner_model]
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
refresh_files_output += [preset_selection]
|
||||
refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
|
||||
queue=False, show_progress=False)
|
||||
|
||||
performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 +
|
||||
[gr.update(visible=x != 'Extreme Speed')] * 1 +
|
||||
[gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1,
|
||||
state_is_generating = gr.State(False)
|
||||
|
||||
load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
|
||||
performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
|
||||
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
|
||||
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
|
||||
refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
|
||||
generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
|
||||
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
def preset_selection_change(preset, is_generating):
|
||||
preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
|
||||
preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
|
||||
|
||||
default_model = preset_prepared.get('base_model')
|
||||
previous_default_models = preset_prepared.get('previous_default_models', [])
|
||||
checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
|
||||
embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
|
||||
lora_downloads = preset_prepared.get('lora_downloads', {})
|
||||
|
||||
preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models(
|
||||
default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads)
|
||||
|
||||
if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
|
||||
del preset_prepared['prompt']
|
||||
|
||||
return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating)
|
||||
|
||||
preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
|
||||
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
|
||||
|
||||
performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
|
||||
[gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
|
||||
[gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1,
|
||||
inputs=performance_selection,
|
||||
outputs=[
|
||||
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
|
||||
@ -579,7 +624,8 @@ with shared.gradio_root:
|
||||
ctrls = [currentTask, generate_image_grid]
|
||||
ctrls += [
|
||||
prompt, negative_prompt, style_selections,
|
||||
performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale
|
||||
performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
|
||||
read_wildcards_in_order, sharpness, guidance_scale
|
||||
]
|
||||
|
||||
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
|
||||
@ -601,8 +647,6 @@ with shared.gradio_root:
|
||||
|
||||
ctrls += ip_ctrls
|
||||
|
||||
state_is_generating = gr.State(False)
|
||||
|
||||
def parse_meta(raw_prompt_txt, is_generating):
|
||||
loaded_json = None
|
||||
if is_json(raw_prompt_txt):
|
||||
@ -618,13 +662,6 @@ with shared.gradio_root:
|
||||
|
||||
prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
|
||||
|
||||
load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
|
||||
performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
|
||||
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
|
||||
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
|
||||
refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
|
||||
generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
|
||||
|
||||
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
|
||||
|
||||
def trigger_metadata_import(filepath, state_is_generating):
|
||||
@ -638,7 +675,6 @@ with shared.gradio_root:
|
||||
|
||||
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
|
||||
|
||||
|
||||
metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
|
||||
.then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user