add max lora config feature

This commit is contained in:
MindOfMatter 2024-01-25 15:02:43 -05:00
parent f7af74a16d
commit cfe7463fe2
4 changed files with 24 additions and 12 deletions

View File

@ -131,7 +131,7 @@ def worker():
base_model_name = args.pop()
refiner_model_name = args.pop()
refiner_switch = args.pop()
loras = [[str(args.pop()), float(args.pop())] for _ in range(5)]
loras = [[str(args.pop()), float(args.pop())] for _ in range(modules.config.default_loras_max_number)]
input_image_checkbox = args.pop()
current_tab = args.pop()
uov_method = args.pop()

View File

@ -192,6 +192,11 @@ default_loras = get_config_item_or_set_default(
],
validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x)
)
default_loras_max_number = get_config_item_or_set_default(
key='default_loras_max_number',
default_value=len(default_loras),
validator=lambda x: isinstance(x, numbers.Number) and x >= 1
)
default_cfg_scale = get_config_item_or_set_default(
key='default_cfg_scale',
default_value=4.0,
@ -318,13 +323,14 @@ example_inpaint_prompts = get_config_item_or_set_default(
example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))]
config_dict["default_loras"] = default_loras = default_loras[:default_loras_max_number] + [['None', 1.0] for _ in range(default_loras_max_number - len(default_loras))]
possible_preset_keys = [
"default_model",
"default_refiner",
"default_refiner_switch",
"default_loras",
"default_loras_max_number",
"default_cfg_scale",
"default_sample_sharpness",
"default_sampler",

View File

@ -135,14 +135,20 @@ def load_parameter_button_click(raw_prompt_txt, is_generating):
results.append(gr.update(visible=False))
for i in range(1, 6):
try:
n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ')
w = float(w)
results.append(n)
results.append(w)
except:
results.append(gr.update())
results.append(gr.update())
for i in range(1, modules.config.default_loras_max_number + 1):
lora_key = f'LoRA {i}'
if lora_key in loaded_parameter_dict:
try:
n, w = loaded_parameter_dict[lora_key].split(' : ')
w = float(w)
results.append(n) # Update LoRA model
results.append(w) # Update LoRA weight
except Exception as e:
# If there's an error parsing, log it or handle it as needed
print(f"Error parsing {lora_key}: {e}")
results.extend([gr.update(), gr.update()]) # Keep existing settings unchanged
else:
# If the LoRA setting is not in the JSON, keep the existing settings unchanged
results.extend([gr.update(), gr.update()])
return results

View File

@ -466,7 +466,7 @@ with shared.gradio_root:
modules.config.update_all_model_names()
results = []
results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)]
for i in range(5):
for i in range(modules.config.default_loras_max_number):
results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results