Fooocus/modules/meta_parser.py
MindOfMatter 18f9f7dc31
feat: make lora number editable in config (#2215)
* Initial commit

* Update README.md

* sync with original main Fooocus repo

* update with my gitignore setup

* add max lora config feature

* Revert "add max lora config feature"

This reverts commit cfe7463fe2.

* add max loras config feature

* Update README.md

* Update .gitignore

* update

* merge

* revert

* refactor: rename default_loras_max_number to default_max_lora_number, validate config for int

* fix: add missing patch_all call and imports again

---------

Co-authored-by: Manuel Schmid <manuel.schmid@odt.net>
2024-02-25 21:12:26 +01:00

151 lines
3.9 KiB
Python

import json
import gradio as gr
import modules.config
def load_parameter_button_click(raw_prompt_txt, is_generating):
loaded_parameter_dict = json.loads(raw_prompt_txt)
assert isinstance(loaded_parameter_dict, dict)
results = [True, 1]
try:
h = loaded_parameter_dict.get('Prompt', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Negative Prompt', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Styles', None)
h = eval(h)
assert isinstance(h, list)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Performance', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Resolution', None)
width, height = eval(h)
formatted = modules.config.add_ratio(f'{width}*{height}')
if formatted in modules.config.available_aspect_ratios:
results.append(formatted)
results.append(-1)
results.append(-1)
else:
results.append(gr.update())
results.append(width)
results.append(height)
except:
results.append(gr.update())
results.append(gr.update())
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Sharpness', None)
assert h is not None
h = float(h)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Guidance Scale', None)
assert h is not None
h = float(h)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('ADM Guidance', None)
p, n, e = eval(h)
results.append(float(p))
results.append(float(n))
results.append(float(e))
except:
results.append(gr.update())
results.append(gr.update())
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Base Model', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Refiner Model', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Refiner Switch', None)
assert h is not None
h = float(h)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Sampler', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Scheduler', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Seed', None)
assert h is not None
h = int(h)
results.append(False)
results.append(h)
except:
results.append(gr.update())
results.append(gr.update())
if is_generating:
results.append(gr.update())
else:
results.append(gr.update(visible=True))
results.append(gr.update(visible=False))
for i in range(1, modules.config.default_max_lora_number + 1):
try:
n, w = loaded_parameter_dict.get(f'LoRA {i}', ' : ').split(' : ')
w = float(w)
results.append(True)
results.append(n)
results.append(w)
except:
results.append(True)
results.append('None')
results.append(1.0)
return results