From a459075d26eecc38d6d58116e38f453450191460 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 22 Aug 2023 10:41:10 +0300 Subject: [PATCH] actual solution to the uncommon hanging problem that is seemingly caused by multiple progress requests working on same tensor --- modules/progress.py | 41 +++++++++++++++++------------------ modules/sd_samplers_common.py | 2 +- modules/shared_state.py | 2 +- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/modules/progress.py b/modules/progress.py index a25a0113..69921de7 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -95,31 +95,30 @@ def progressapi(req: ProgressRequest): predicted_duration = elapsed_since_start / progress if progress > 0 else None eta = predicted_duration - elapsed_since_start if predicted_duration is not None else None + live_preview = None id_live_preview = req.id_live_preview - shared.state.set_current_image() - if opts.live_previews_enable and req.live_preview and shared.state.id_live_preview != req.id_live_preview: - image = shared.state.current_image - if image is not None: - buffered = io.BytesIO() - if opts.live_previews_image_format == "png": - # using optimize for large images takes an enormous amount of time - if max(*image.size) <= 256: - save_kwargs = {"optimize": True} + if opts.live_previews_enable and req.live_preview: + shared.state.set_current_image() + if shared.state.id_live_preview != req.id_live_preview: + image = shared.state.current_image + if image is not None: + buffered = io.BytesIO() + + if opts.live_previews_image_format == "png": + # using optimize for large images takes an enormous amount of time + if max(*image.size) <= 256: + save_kwargs = {"optimize": True} + else: + save_kwargs = {"optimize": False, "compress_level": 1} + else: - save_kwargs = {"optimize": False, "compress_level": 1} + save_kwargs = {} - else: - save_kwargs = {} - - image.save(buffered, format=opts.live_previews_image_format, **save_kwargs) - base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') - live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}" - id_live_preview = shared.state.id_live_preview - else: - live_preview = None - else: - live_preview = None + image.save(buffered, format=opts.live_previews_image_format, **save_kwargs) + base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') + live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}" + id_live_preview = shared.state.id_live_preview return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 64845ea4..60fa161c 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -111,7 +111,7 @@ def images_tensor_to_samples(image, approximation=None, model=None): def store_latent(decoded): - state.current_latent = decoded.clone() + state.current_latent = decoded if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if not shared.parallel_processing_allowed: diff --git a/modules/shared_state.py b/modules/shared_state.py index 3dc9c788..d272ee5b 100644 --- a/modules/shared_state.py +++ b/modules/shared_state.py @@ -128,7 +128,7 @@ class State: devices.torch_gc() def set_current_image(self): - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" + """if enough sampling steps have been made after the last call to this, sets self.current_image from self.current_latent, and modifies self.id_live_preview accordingly""" if not shared.parallel_processing_allowed: return