support embeddings (#675)
This commit is contained in:
parent
b3c6624e82
commit
2f6843da95
@ -1 +1 @@
|
|||||||
version = '2.1.58'
|
version = '2.1.59'
|
||||||
|
|||||||
@ -24,6 +24,7 @@ from fcbh_extras.nodes_freelunch import FreeU
|
|||||||
from fcbh.sample import prepare_mask
|
from fcbh.sample import prepare_mask
|
||||||
from modules.patch import patched_sampler_cfg_function, patched_model_function_wrapper
|
from modules.patch import patched_sampler_cfg_function, patched_model_function_wrapper
|
||||||
from fcbh.lora import model_lora_keys_unet, model_lora_keys_clip, load_lora
|
from fcbh.lora import model_lora_keys_unet, model_lora_keys_clip, load_lora
|
||||||
|
from modules.path import embeddings_path
|
||||||
|
|
||||||
|
|
||||||
opEmptyLatentImage = EmptyLatentImage()
|
opEmptyLatentImage = EmptyLatentImage()
|
||||||
@ -66,7 +67,7 @@ def apply_controlnet(positive, negative, control_net, image, strength, start_per
|
|||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
def load_model(ckpt_filename):
|
def load_model(ckpt_filename):
|
||||||
unet, clip, vae, clip_vision = load_checkpoint_guess_config(ckpt_filename)
|
unet, clip, vae, clip_vision = load_checkpoint_guess_config(ckpt_filename, embedding_directory=embeddings_path)
|
||||||
unet.model_options['sampler_cfg_function'] = patched_sampler_cfg_function
|
unet.model_options['sampler_cfg_function'] = patched_sampler_cfg_function
|
||||||
unet.model_options['model_function_wrapper'] = patched_model_function_wrapper
|
unet.model_options['model_function_wrapper'] = patched_model_function_wrapper
|
||||||
return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision)
|
return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision)
|
||||||
|
|||||||
@ -49,6 +49,7 @@ def get_dir_or_set_default(key, default_value):
|
|||||||
|
|
||||||
modelfile_path = get_dir_or_set_default('modelfile_path', '../models/checkpoints/')
|
modelfile_path = get_dir_or_set_default('modelfile_path', '../models/checkpoints/')
|
||||||
lorafile_path = get_dir_or_set_default('lorafile_path', '../models/loras/')
|
lorafile_path = get_dir_or_set_default('lorafile_path', '../models/loras/')
|
||||||
|
embeddings_path = get_dir_or_set_default('embeddings_path', '../models/embeddings/')
|
||||||
vae_approx_path = get_dir_or_set_default('vae_approx_path', '../models/vae_approx/')
|
vae_approx_path = get_dir_or_set_default('vae_approx_path', '../models/vae_approx/')
|
||||||
upscale_models_path = get_dir_or_set_default('upscale_models_path', '../models/upscale_models/')
|
upscale_models_path = get_dir_or_set_default('upscale_models_path', '../models/upscale_models/')
|
||||||
inpaint_models_path = get_dir_or_set_default('inpaint_models_path', '../models/inpaint/')
|
inpaint_models_path = get_dir_or_set_default('inpaint_models_path', '../models/inpaint/')
|
||||||
|
|||||||
20
presets/anime.json
Normal file
20
presets/anime.json
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"default_model": "sd_xl_base_1.0_0.9vae.safetensors",
|
||||||
|
"default_refiner": "sd_xl_refiner_1.0_0.9vae.safetensors",
|
||||||
|
"default_lora": "sd_xl_offset_example-lora_1.0.safetensors",
|
||||||
|
"default_lora_weight": 0.5,
|
||||||
|
"default_cfg_scale": 7.0,
|
||||||
|
"default_sampler": "dpmpp_2m_sde_gpu",
|
||||||
|
"default_scheduler": "karras",
|
||||||
|
"default_styles": [
|
||||||
|
"Fooocus V2",
|
||||||
|
"Default (Slightly Cinematic)"
|
||||||
|
],
|
||||||
|
"default_negative_prompt": "low quality, bad hands, bad eyes, cropped, missing fingers, extra digit",
|
||||||
|
"default_positive_prompt": "",
|
||||||
|
"checkpoint_downloads": {
|
||||||
|
"sd_xl_base_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors",
|
||||||
|
"sd_xl_refiner_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors"
|
||||||
|
},
|
||||||
|
"default_aspect_ratio": "1152*896"
|
||||||
|
}
|
||||||
20
presets/realistic.json
Normal file
20
presets/realistic.json
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"default_model": "sd_xl_base_1.0_0.9vae.safetensors",
|
||||||
|
"default_refiner": "sd_xl_refiner_1.0_0.9vae.safetensors",
|
||||||
|
"default_lora": "sd_xl_offset_example-lora_1.0.safetensors",
|
||||||
|
"default_lora_weight": 0.5,
|
||||||
|
"default_cfg_scale": 7.0,
|
||||||
|
"default_sampler": "dpmpp_2m_sde_gpu",
|
||||||
|
"default_scheduler": "karras",
|
||||||
|
"default_styles": [
|
||||||
|
"Fooocus V2",
|
||||||
|
"Default (Slightly Cinematic)"
|
||||||
|
],
|
||||||
|
"default_negative_prompt": "low quality, bad hands, bad eyes, cropped, missing fingers, extra digit",
|
||||||
|
"default_positive_prompt": "",
|
||||||
|
"checkpoint_downloads": {
|
||||||
|
"sd_xl_base_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors",
|
||||||
|
"sd_xl_refiner_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors"
|
||||||
|
},
|
||||||
|
"default_aspect_ratio": "1152*896"
|
||||||
|
}
|
||||||
Loading…
x
Reference in New Issue
Block a user