update dep, sampler (#217)
This commit is contained in:
parent
d2e7d940ce
commit
084f1dfb72
@ -1 +1 @@
|
||||
version = '1.0.37'
|
||||
version = '1.0.38'
|
||||
|
@ -20,7 +20,7 @@ def prepare_environment():
|
||||
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
|
||||
|
||||
comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
|
||||
comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "2bc12d3d22efb5c63ae3a7fc342bb2dd16b31735")
|
||||
comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "c9b562aed153cb35d4ce4126caf86995b0c63b12")
|
||||
|
||||
print(f"Python {sys.version}")
|
||||
print(f"Fooocus version: {fooocus_version.version}")
|
||||
|
@ -9,7 +9,7 @@ import comfy.utils
|
||||
|
||||
from comfy.sd import load_checkpoint_guess_config
|
||||
from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode
|
||||
from comfy.sample import prepare_mask, broadcast_cond, load_additional_models, cleanup_additional_models
|
||||
from comfy.sample import prepare_mask, broadcast_cond, get_additional_models, cleanup_additional_models
|
||||
from modules.samplers_advanced import KSampler, KSamplerWithRefiner
|
||||
from modules.patch import patch_all
|
||||
|
||||
@ -92,7 +92,7 @@ def get_previewer(device, latent_format):
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
|
||||
def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='uni_pc',
|
||||
scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
|
||||
force_full_denoise=False, callback_function=None):
|
||||
# SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
|
||||
@ -133,7 +133,6 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
|
||||
if noise_mask is not None:
|
||||
noise_mask = prepare_mask(noise_mask, noise.shape, device)
|
||||
|
||||
comfy.model_management.load_model_gpu(model)
|
||||
real_model = model.model
|
||||
|
||||
noise = noise.to(device)
|
||||
@ -142,7 +141,9 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
|
||||
positive_copy = broadcast_cond(positive, noise.shape[0], device)
|
||||
negative_copy = broadcast_cond(negative, noise.shape[0], device)
|
||||
|
||||
models = load_additional_models(positive, negative, model.model_dtype())
|
||||
models = get_additional_models(positive, negative)
|
||||
comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(
|
||||
noise.shape[0] * noise.shape[2] * noise.shape[3]))
|
||||
|
||||
sampler = KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler,
|
||||
denoise=denoise, model_options=model.model_options)
|
||||
@ -164,7 +165,7 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
|
||||
|
||||
@torch.no_grad()
|
||||
def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive, refiner_negative, latent,
|
||||
seed=None, steps=30, refiner_switch_step=20, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
|
||||
seed=None, steps=30, refiner_switch_step=20, cfg=7.0, sampler_name='uni_pc',
|
||||
scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
|
||||
force_full_denoise=False, callback_function=None):
|
||||
# SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
|
||||
@ -205,8 +206,6 @@ def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive,
|
||||
if noise_mask is not None:
|
||||
noise_mask = prepare_mask(noise_mask, noise.shape, device)
|
||||
|
||||
comfy.model_management.load_model_gpu(model)
|
||||
|
||||
noise = noise.to(device)
|
||||
latent_image = latent_image.to(device)
|
||||
|
||||
@ -216,7 +215,9 @@ def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive,
|
||||
refiner_positive_copy = broadcast_cond(refiner_positive, noise.shape[0], device)
|
||||
refiner_negative_copy = broadcast_cond(refiner_negative, noise.shape[0], device)
|
||||
|
||||
models = load_additional_models(positive, negative, model.model_dtype())
|
||||
models = get_additional_models(positive, negative)
|
||||
comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(
|
||||
noise.shape[0] * noise.shape[2] * noise.shape[3]))
|
||||
|
||||
sampler = KSamplerWithRefiner(model=model, refiner_model=refiner, steps=steps, device=device,
|
||||
sampler=sampler_name, scheduler=scheduler,
|
||||
|
@ -103,7 +103,7 @@ Note that some of these tricks are currently (2023 Aug 11) impossible to reprodu
|
||||
6. The parameters of samplers are carefully tuned.
|
||||
7. Because XL uses positional encoding for generation resolution, images generated by several fixed resolutions look a bit better than that from arbitrary resolutions (because the positional encoding is not very good at handling int numbers that are unseen during training). This suggests that the resolutions in UI may be hard coded for best results.
|
||||
8. Separated prompts for two different text encoders seem unnecessary. Separated prompts for base model and refiner may work but the effects are random, and we refrain from implement this.
|
||||
9. DPM family seems well-suited for XL, since XL sometimes generates overly smooth texture but DPM family sometimes generate overly dense detail in texture. Their joint effect looks neutral and appealing to human perception.
|
||||
9. DPM family (or UniPC) seems well-suited for XL, since XL sometimes generates overly smooth texture but DPM family sometimes generate overly dense detail in texture. Their joint effect looks neutral and appealing to human perception. (Update 2023 Aug 19, changed to UniPC.)
|
||||
|
||||
## Advanced Features
|
||||
|
||||
|
@ -1,3 +1,7 @@
|
||||
### 1.0.38
|
||||
|
||||
* Update dependency, update to sampler.
|
||||
|
||||
### 1.0.37
|
||||
|
||||
* Cinematic-default v2.
|
||||
|
Loading…
Reference in New Issue
Block a user