From 2f31d9e5a733ceba90100fd2220bcdac33d57ecd Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Tue, 3 Oct 2023 10:59:53 -0700 Subject: [PATCH] fix (#521) * fix * fix --- fooocus_version.py | 2 +- modules/core.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/fooocus_version.py b/fooocus_version.py index 14318f5..7230dbe 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.0.80' +version = '2.0.81' diff --git a/modules/core.py b/modules/core.py index cf5128f..b5777b9 100644 --- a/modules/core.py +++ b/modules/core.py @@ -127,13 +127,19 @@ def generate_empty_latent(width=1024, height=1024, batch_size=1): @torch.no_grad() @torch.inference_mode() def decode_vae(vae, latent_image, tiled=False): - return (opVAEDecodeTiled if tiled else opVAEDecode).decode(samples=latent_image, vae=vae)[0] + if tiled: + return opVAEDecodeTiled.decode(samples=latent_image, vae=vae, tile_size=512)[0] + else: + return opVAEDecode.decode(samples=latent_image, vae=vae)[0] @torch.no_grad() @torch.inference_mode() def encode_vae(vae, pixels, tiled=False): - return (opVAEEncodeTiled if tiled else opVAEEncode).encode(pixels=pixels, vae=vae)[0] + if tiled: + return opVAEEncodeTiled.encode(pixels=pixels, vae=vae, tile_size=512)[0] + else: + return opVAEEncode.encode(pixels=pixels, vae=vae)[0] @torch.no_grad()