i
This commit is contained in:
		
							parent
							
								
									93fdeb6345
								
							
						
					
					
						commit
						8e08e3612f
					
				| @ -2,13 +2,17 @@ import random | |||||||
| import torch | import torch | ||||||
| import numpy as np | import numpy as np | ||||||
| 
 | 
 | ||||||
|  | import comfy.model_management | ||||||
|  | import comfy.sample | ||||||
|  | import comfy.utils | ||||||
|  | import latent_preview | ||||||
|  | 
 | ||||||
| from comfy.sd import load_checkpoint_guess_config | from comfy.sd import load_checkpoint_guess_config | ||||||
| from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, CLIPTextEncode | from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode, common_ksampler | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| opCLIPTextEncode = CLIPTextEncode() | opCLIPTextEncode = CLIPTextEncode() | ||||||
| opEmptyLatentImage = EmptyLatentImage() | opEmptyLatentImage = EmptyLatentImage() | ||||||
| opKSamplerAdvanced = KSamplerAdvanced() |  | ||||||
| opVAEDecode = VAEDecode() | opVAEDecode = VAEDecode() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @ -42,24 +46,42 @@ def decode_vae(vae, latent_image): | |||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @torch.no_grad() | @torch.no_grad() | ||||||
| def ksample(unet, positive_condition, negative_condition, latent_image, add_noise=True, noise_seed=None, steps=25, cfg=9, | def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=9.0, sampler_name='euler_ancestral', scheduler='normal', denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False): | ||||||
|             sampler_name='euler_ancestral', scheduler='normal', start_at_step=None, end_at_step=None, |     seed = seed if isinstance(seed, int) else random.randint(1, 2 ** 64) | ||||||
|             return_with_leftover_noise=False): | 
 | ||||||
|     return opKSamplerAdvanced.sample( |     device = comfy.model_management.get_torch_device() | ||||||
|         add_noise='enable' if add_noise else 'disable', |     latent_image = latent["samples"] | ||||||
|         noise_seed=noise_seed if isinstance(noise_seed, int) else random.randint(1, 2 ** 64), | 
 | ||||||
|         steps=steps, |     if disable_noise: | ||||||
|         cfg=cfg, |         noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") | ||||||
|         sampler_name=sampler_name, |     else: | ||||||
|         scheduler=scheduler, |         batch_inds = latent["batch_index"] if "batch_index" in latent else None | ||||||
|         start_at_step=0 if start_at_step is None else start_at_step, |         noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) | ||||||
|         end_at_step=steps if end_at_step is None else end_at_step, | 
 | ||||||
|         return_with_leftover_noise='enable' if return_with_leftover_noise else 'disable', |     noise_mask = None | ||||||
|         model=unet, |     if "noise_mask" in latent: | ||||||
|         positive=positive_condition, |         noise_mask = latent["noise_mask"] | ||||||
|         negative=negative_condition, | 
 | ||||||
|         latent_image=latent_image, |     preview_format = "JPEG" | ||||||
|     )[0] |     if preview_format not in ["JPEG", "PNG"]: | ||||||
|  |         preview_format = "JPEG" | ||||||
|  | 
 | ||||||
|  |     previewer = latent_preview.get_previewer(device, model.model.latent_format) | ||||||
|  | 
 | ||||||
|  |     pbar = comfy.utils.ProgressBar(steps) | ||||||
|  | 
 | ||||||
|  |     def callback(step, x0, x, total_steps): | ||||||
|  |         preview_bytes = None | ||||||
|  |         if previewer: | ||||||
|  |             preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) | ||||||
|  |         pbar.update_absolute(step + 1, total_steps, preview_bytes) | ||||||
|  | 
 | ||||||
|  |     samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, | ||||||
|  |                                   denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, | ||||||
|  |                                   force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed) | ||||||
|  |     out = latent.copy() | ||||||
|  |     out["samples"] = samples | ||||||
|  |     return (out, ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @torch.no_grad() | @torch.no_grad() | ||||||
|  | |||||||
| @ -23,20 +23,20 @@ def process(positive_prompt, negative_prompt, width=1024, height=1024, batch_siz | |||||||
| 
 | 
 | ||||||
|     empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=batch_size) |     empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=batch_size) | ||||||
| 
 | 
 | ||||||
|     sampled_latent = core.ksample( |     sampled_latent = core.ksampler( | ||||||
|         unet=xl_base.unet, |         model=xl_base.unet, | ||||||
|         positive_condition=positive_conditions, |         positive=positive_conditions, | ||||||
|         negative_condition=negative_conditions, |         negative=negative_conditions, | ||||||
|         latent_image=empty_latent, |         latent=empty_latent, | ||||||
|         steps=30, start_at_step=0, end_at_step=20, return_with_leftover_noise=True, add_noise=True |         steps=30, start_step=0, last_step=20, disable_noise=False, force_full_denoise=False | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     sampled_latent = core.ksample( |     sampled_latent = core.ksampler( | ||||||
|         unet=xl_refiner.unet, |         model=xl_refiner.unet, | ||||||
|         positive_condition=positive_conditions_refiner, |         positive=positive_conditions_refiner, | ||||||
|         negative_condition=negative_conditions_refiner, |         negative=negative_conditions_refiner, | ||||||
|         latent_image=sampled_latent, |         latent=sampled_latent, | ||||||
|         steps=30, start_at_step=20, end_at_step=30, return_with_leftover_noise=False, add_noise=False |         steps=30, start_step=20, last_step=30, disable_noise=True, force_full_denoise=True | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     decoded_latent = core.decode_vae(vae=xl_refiner.vae, latent_image=sampled_latent) |     decoded_latent = core.decode_vae(vae=xl_refiner.vae, latent_image=sampled_latent) | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user