mirror of
https://github.com/sd-webui/stable-diffusion-webui.git
synced 2024-12-14 14:52:31 +03:00
Job Manager feature - view images before all are complete, cancel ongoing generations (#460)
* Add max-jobs command line argument
Adds a new command line argument, max-jobs, which will set the number
of concurrent jobs the gradio queue will allow. When set to more than
the default of 1 the gradio UI will be able to process additional
UI commands at the same time.
* JobManager: initial txt2img implementation
Initial implementation of JobManager, applied to txt2img.
Adds 'refresh' and 'cancel' buttons to the UI. These are useful when
generating images with large batch counts. The 'refresh' button will
update the gallery with the currently-generated images, and the cancel
button will cause the generation to stop after the current iteration.
The new job manager can be disabled with the parameter
--no-job-manager
* JobManager: Add status update text
* JobManager: Replace wrapped inputs as well
* JobManager: Per-session unique keys
* JobManager: Pre and Post call funcs, UI updates
Added pre- and post- function call 'dummy objects' to allow updating
the UI before and after a generate run. Update the visuals of the
buttons and status text in these new functions.
* JobManager: enforce maximum jobs
* JobManager: Move 'call' func code block
It just makes more sense between _pre and _post.
* JobManager: Add session management
Adds support for multiple browser sessions.
A single session cannot run the same job twice.
If there are no available jobs when Generate is clicked, the
generation aborts. It does *not* queue.
* JobManager: add session maintenance
Addded the ability for one session to stop all concurrent sessions,
and to free memory from any 'finished' sessions for which the
browser has been closed (as the images will be stored until the
browser does a final 'refresh' after the job finishes, which will
never happen if the browser closed)
* JobManager: Add img2img support
This *should* add JobManager to img2img, but it is untested
since img2img is broken for me even without my changes.
* Fixed img2img functionality on this pr
* Revert "Fixed img2img functionality on this pr"
This reverts commit 649b1e8e65
.
* Img2Img: Fix 'image editor' options not visible
* Fix Img2Img Job Manager integration
* Img2Img UI: Move JobManager above Image Actions
It is helpful if it is on the screen when you hit generate, so
you can notice the button light up when generation starts.
* Improve job status text
* JobManager: Free available job on exception
* JobManager: Add queueing
Adds a simple queueing system to JobManager. If max-jobs concurrent
jobs are already active then any subsequent jobs will block until
a slot frees up.
Note: The UI does not give great feedback to this. The JobManager
status box will say "Loading..."
* JobManager: Fix queue accidentally LIFO
Queues should really be first in, first out.
* JobManager: add draw_gradio_ui function
Reduces a lot of boilerplate code in frontend.py
Co-authored-by: hlky <106811348+hlky@users.noreply.github.com>
This commit is contained in:
parent
e35fcb911f
commit
b969428590
@ -2,17 +2,21 @@ import sys
|
||||
from tkinter.filedialog import askopenfilename
|
||||
import gradio as gr
|
||||
from frontend.css_and_js import css, js, call_JS, js_parse_prompt, js_copy_txt2img_output
|
||||
from frontend.job_manager import JobManager
|
||||
import frontend.ui_functions as uifn
|
||||
import uuid
|
||||
try:
|
||||
import pyperclip
|
||||
except ImportError:
|
||||
print("Warning: pyperclip is not installed. Pasting settings is unavailable.", file=sys.stderr)
|
||||
pyperclip = None
|
||||
|
||||
|
||||
def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda x: x, txt2img_defaults={}, RealESRGAN=True, GFPGAN=True,LDSR=True,
|
||||
txt2img_toggles={}, txt2img_toggle_defaults='k_euler', show_embeddings=False, img2img_defaults={},
|
||||
img2img_toggles={}, img2img_toggle_defaults={}, sample_img2img=None, img2img_mask_modes=None,
|
||||
img2img_resize_modes=None, imgproc_defaults={},imgproc_mode_toggles={},user_defaults={}, run_GFPGAN=lambda x: x, run_RealESRGAN=lambda x: x):
|
||||
img2img_resize_modes=None, imgproc_defaults={},imgproc_mode_toggles={},user_defaults={}, run_GFPGAN=lambda x: x, run_RealESRGAN=lambda x: x,
|
||||
job_manager: JobManager = None) -> gr.Blocks:
|
||||
|
||||
with gr.Blocks(css=css(opt), analytics_enabled=False, title="Stable Diffusion WebUI") as demo:
|
||||
with gr.Tabs(elem_id='tabss') as tabs:
|
||||
@ -26,7 +30,6 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
value=txt2img_defaults['prompt'],
|
||||
show_label=False)
|
||||
txt2img_btn = gr.Button("Generate", elem_id="generate", variant="primary")
|
||||
|
||||
with gr.Row(elem_id='body').style(equal_height=False):
|
||||
with gr.Column():
|
||||
txt2img_width = gr.Slider(minimum=64, maximum=1024, step=64, label="Width",
|
||||
@ -42,6 +45,8 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
label='Number of images to generate',
|
||||
value=txt2img_defaults['n_iter'])
|
||||
|
||||
txt2img_job_ui = job_manager.draw_gradio_ui() if job_manager else None
|
||||
|
||||
txt2img_dimensions_info_text_box = gr.Textbox(label="Aspect ratio (4:3 = 1.333 | 16:9 = 1.777 | 21:9 = 2.333)")
|
||||
with gr.Column():
|
||||
with gr.Box():
|
||||
@ -108,19 +113,31 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
txt2img_embeddings = gr.File(label="Embeddings file for textual inversion",
|
||||
visible=show_embeddings)
|
||||
|
||||
txt2img_func = txt2img
|
||||
txt2img_inputs = [txt2img_prompt, txt2img_steps, txt2img_sampling, txt2img_toggles,
|
||||
txt2img_realesrgan_model_name, txt2img_ddim_eta, txt2img_batch_count,
|
||||
txt2img_batch_size, txt2img_cfg, txt2img_seed, txt2img_height, txt2img_width,
|
||||
txt2img_embeddings, txt2img_variant_amount, txt2img_variant_seed]
|
||||
txt2img_outputs = [output_txt2img_gallery, output_txt2img_seed,
|
||||
output_txt2img_params, output_txt2img_stats]
|
||||
|
||||
# If a JobManager was passed in then wrap the Generate functions
|
||||
if txt2img_job_ui:
|
||||
txt2img_func, txt2img_inputs, txt2img_outputs = txt2img_job_ui.wrap_func(
|
||||
func=txt2img_func,
|
||||
inputs=txt2img_inputs,
|
||||
outputs=txt2img_outputs
|
||||
)
|
||||
|
||||
txt2img_btn.click(
|
||||
txt2img,
|
||||
[txt2img_prompt, txt2img_steps, txt2img_sampling, txt2img_toggles, txt2img_realesrgan_model_name,
|
||||
txt2img_ddim_eta, txt2img_batch_count, txt2img_batch_size, txt2img_cfg, txt2img_seed,
|
||||
txt2img_height, txt2img_width, txt2img_embeddings, txt2img_variant_amount, txt2img_variant_seed],
|
||||
[output_txt2img_gallery, output_txt2img_seed, output_txt2img_params, output_txt2img_stats]
|
||||
txt2img_func,
|
||||
txt2img_inputs,
|
||||
txt2img_outputs
|
||||
)
|
||||
txt2img_prompt.submit(
|
||||
txt2img,
|
||||
[txt2img_prompt, txt2img_steps, txt2img_sampling, txt2img_toggles, txt2img_realesrgan_model_name,
|
||||
txt2img_ddim_eta, txt2img_batch_count, txt2img_batch_size, txt2img_cfg, txt2img_seed,
|
||||
txt2img_height, txt2img_width, txt2img_embeddings, txt2img_variant_amount, txt2img_variant_seed],
|
||||
[output_txt2img_gallery, output_txt2img_seed, output_txt2img_params, output_txt2img_stats]
|
||||
txt2img_func,
|
||||
txt2img_inputs,
|
||||
txt2img_outputs
|
||||
)
|
||||
txt2img_settings_elements = [
|
||||
txt2img_prompt, txt2img_steps, txt2img_sampling, txt2img_toggles, txt2img_realesrgan_model_name,
|
||||
@ -187,7 +204,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
|
||||
with gr.Tabs():
|
||||
with gr.TabItem("Editor Options"):
|
||||
with gr.Column():
|
||||
with gr.Row():
|
||||
img2img_image_editor_mode = gr.Radio(choices=["Mask", "Crop", "Uncrop"], label="Image Editor Mode",
|
||||
value="Crop", elem_id='edit_mode_select')
|
||||
img2img_mask = gr.Radio(choices=["Keep masked area", "Regenerate only masked area"],
|
||||
@ -211,6 +228,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
with gr.Column():
|
||||
gr.Markdown('#### Img2Img Results')
|
||||
output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style(grid=[4,4,4])
|
||||
img2img_job_ui = job_manager.draw_gradio_ui() if job_manager else None
|
||||
with gr.Tabs():
|
||||
with gr.TabItem("Generated image actions", id="img2img_actions_tab"):
|
||||
gr.Markdown("Select an image, then press one of the buttons below")
|
||||
@ -231,6 +249,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
inputs=output_img2img_seed, outputs=[],
|
||||
_js=call_JS("gradioInputToClipboard"), fn=None, show_progress=False)
|
||||
output_img2img_stats = gr.HTML(label='Stats')
|
||||
|
||||
gr.Markdown('# img2img settings')
|
||||
|
||||
with gr.Row():
|
||||
@ -316,24 +335,31 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
fromId="img2img_gallery_output")
|
||||
)
|
||||
|
||||
img2img_func = img2img
|
||||
img2img_inputs = [img2img_prompt, img2img_image_editor_mode, img2img_image_editor, img2img_mask,
|
||||
img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
|
||||
img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
|
||||
img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
|
||||
img2img_embeddings]
|
||||
img2img_outputs = [output_img2img_gallery, output_img2img_seed, output_img2img_params, output_img2img_stats]
|
||||
|
||||
# If a JobManager was passed in then wrap the Generate functions
|
||||
if img2img_job_ui:
|
||||
img2img_func, img2img_inputs, img2img_outputs = img2img_job_ui.wrap_func(
|
||||
func=img2img_func,
|
||||
inputs=img2img_inputs,
|
||||
outputs=img2img_outputs,
|
||||
)
|
||||
|
||||
img2img_btn_mask.click(
|
||||
img2img,
|
||||
[img2img_prompt, img2img_image_editor_mode, img2img_image_mask, img2img_mask,
|
||||
img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
|
||||
img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
|
||||
img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
|
||||
img2img_embeddings],
|
||||
[output_img2img_gallery, output_img2img_seed, output_img2img_params, output_img2img_stats]
|
||||
img2img_func,
|
||||
img2img_inputs,
|
||||
img2img_outputs
|
||||
)
|
||||
def img2img_submit_params():
|
||||
return (img2img,
|
||||
[img2img_prompt, img2img_image_editor_mode, img2img_image_editor, img2img_mask,
|
||||
img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
|
||||
img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
|
||||
img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
|
||||
img2img_embeddings],
|
||||
[output_img2img_gallery, output_img2img_seed, output_img2img_params, output_img2img_stats])
|
||||
|
||||
return (img2img_func,
|
||||
img2img_inputs,
|
||||
img2img_outputs)
|
||||
|
||||
img2img_btn_editor.click(*img2img_submit_params())
|
||||
|
||||
|
394
frontend/job_manager.py
Normal file
394
frontend/job_manager.py
Normal file
@ -0,0 +1,394 @@
|
||||
''' Provides simple job management for gradio, allowing viewing and stopping in-progress multi-batch generations '''
|
||||
from __future__ import annotations
|
||||
import gradio as gr
|
||||
from gradio.components import Component, Gallery
|
||||
from threading import Event, Timer
|
||||
from typing import Callable, List, Dict, Tuple, Optional, Any
|
||||
from dataclasses import dataclass, field
|
||||
from functools import partial
|
||||
from PIL.Image import Image
|
||||
import uuid
|
||||
|
||||
|
||||
@dataclass(eq=True, frozen=True)
|
||||
class FuncKey:
|
||||
job_id: str
|
||||
func: Callable
|
||||
|
||||
|
||||
@dataclass(eq=True, frozen=True)
|
||||
class JobKey:
|
||||
func_key: FuncKey
|
||||
session_key: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class JobInfo:
|
||||
inputs: List[Component]
|
||||
func: Callable
|
||||
session_key: str
|
||||
job_token: Optional[int] = None
|
||||
images: List[Image] = field(default_factory=list)
|
||||
should_stop: Event = field(default_factory=Event)
|
||||
job_status: str = field(default_factory=str)
|
||||
finished: bool = False
|
||||
removed_output_idxs: List[int] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionInfo:
|
||||
jobs: Dict[FuncKey, JobInfo] = field(default_factory=dict)
|
||||
finished_jobs: Dict[FuncKey, JobInfo] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueueItem:
|
||||
wait_event: Event
|
||||
|
||||
|
||||
def triggerChangeEvent():
|
||||
return uuid.uuid4().hex
|
||||
|
||||
|
||||
@dataclass
|
||||
class JobManagerUi:
|
||||
def wrap_func(
|
||||
self,
|
||||
func: Callable,
|
||||
inputs: List[Component],
|
||||
outputs: List[Component]) -> Tuple[Callable, List[Component], List[Component]]:
|
||||
''' Takes a gradio event listener function and its input/outputs and returns wrapped replacements which will
|
||||
be managed by JobManager
|
||||
Parameters:
|
||||
func (Callable) the original event listener to be wrapped.
|
||||
This listener should be modified to take a 'job_info' parameter which, if not None, should can
|
||||
be used by the function to check for stop events and to store intermediate image results
|
||||
inputs (List[Component]) the original inputs
|
||||
outputs (List[Component]) the original outputs. The first gallery, if any, will be used for refreshing images
|
||||
refresh_btn: (gr.Button, optional) a button to use for updating the gallery with intermediate results
|
||||
stop_btn: (gr.Button, optional) a button to use for stopping the function
|
||||
status_text: (gr.Textbox) a textbox to display job status updates
|
||||
|
||||
Returns:
|
||||
Tuple(newFunc (Callable), newInputs (List[Component]), newOutputs (List[Component]), which should be used as
|
||||
replacements for the passed in function, inputs and outputs
|
||||
'''
|
||||
return self._job_manager._wrap_func(
|
||||
func=func, inputs=inputs, outputs=outputs,
|
||||
refresh_btn=self._refresh_btn, stop_btn=self._stop_btn, status_text=self._status_text
|
||||
)
|
||||
|
||||
_refresh_btn: gr.Button
|
||||
_stop_btn: gr.Button
|
||||
_status_text: gr.Textbox
|
||||
_stop_all_session_btn: gr.Button
|
||||
_free_done_sessions_btn: gr.Button
|
||||
_job_manager: JobManager
|
||||
|
||||
|
||||
class JobManager:
|
||||
def __init__(self, max_jobs: int):
|
||||
self._max_jobs: int = max_jobs
|
||||
self._avail_job_tokens: List[Any] = list(range(max_jobs))
|
||||
self._job_queue: List[QueueItem] = []
|
||||
self._sessions: Dict[str, SessionInfo] = {}
|
||||
self._session_key: gr.JSON = None
|
||||
|
||||
def draw_gradio_ui(self) -> JobManagerUi:
|
||||
''' draws the job manager ui in gradio
|
||||
Returns:
|
||||
ui (JobManagerUi): object which can connect functions to the ui
|
||||
'''
|
||||
assert gr.context.Context.block is not None, "draw_gradio_ui must be called within a 'gr.Blocks' 'with' context"
|
||||
with gr.Tabs():
|
||||
with gr.TabItem("Current Session"):
|
||||
with gr.Row():
|
||||
stop_btn = gr.Button("Stop", elem_id="stop", variant="secondary")
|
||||
refresh_btn = gr.Button("Refresh", elem_id="refresh", variant="secondary")
|
||||
status_text = gr.Textbox(placeholder="Job Status", interactive=False, show_label=False)
|
||||
with gr.TabItem("Maintenance"):
|
||||
with gr.Row():
|
||||
gr.Markdown(
|
||||
"Stop all concurrent sessions, or free memory associated with jobs which were finished after the browser was closed")
|
||||
with gr.Row():
|
||||
stop_all_sessions_btn = gr.Button(
|
||||
"Stop All Sessions", elem_id="stop_all", variant="secondary"
|
||||
)
|
||||
free_done_sessions_btn = gr.Button(
|
||||
"Clear Finished Jobs", elem_id="clear_finished", variant="secondary"
|
||||
)
|
||||
return JobManagerUi(_refresh_btn=refresh_btn, _stop_btn=stop_btn, _status_text=status_text,
|
||||
_stop_all_session_btn=stop_all_sessions_btn, _free_done_sessions_btn=free_done_sessions_btn,
|
||||
_job_manager=self)
|
||||
|
||||
def clear_all_finished_jobs(self):
|
||||
''' Removes all currently finished jobs, across all sessions.
|
||||
Useful to free memory if a job is started and the browser is closed
|
||||
before it finishes '''
|
||||
for session in self._sessions.values():
|
||||
session.finished_jobs.clear()
|
||||
|
||||
def stop_all_jobs(self):
|
||||
''' Stops all active jobs, across all sessions'''
|
||||
for session in self._sessions.values():
|
||||
for job in session.jobs.values():
|
||||
job.should_stop.set()
|
||||
|
||||
def _get_job_token(self, block: bool = False) -> Optional[int]:
|
||||
''' Attempts to acquire a job token, optionally blocking until available '''
|
||||
token = None
|
||||
while token is None:
|
||||
try:
|
||||
token = self._avail_job_tokens.pop()
|
||||
break
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
if not block:
|
||||
break
|
||||
|
||||
# No token and requested to block, so queue up
|
||||
wait_event = Event()
|
||||
self._job_queue.append(QueueItem(wait_event))
|
||||
wait_event.wait()
|
||||
|
||||
return token
|
||||
|
||||
def _release_job_token(self, token: int) -> None:
|
||||
''' Returns a job token to allow another job to start '''
|
||||
self._avail_job_tokens.append(token)
|
||||
self._run_queued_jobs()
|
||||
|
||||
def _refresh_func(self, func_key: FuncKey, session_key: str) -> List[Component]:
|
||||
''' Updates information from the active job '''
|
||||
session_info, job_info = self._get_call_info(func_key, session_key)
|
||||
if job_info is None:
|
||||
return [None, f"Session {session_key} was not running function {func_key}"]
|
||||
return [triggerChangeEvent(), job_info.job_status]
|
||||
|
||||
def _stop_wrapped_func(self, func_key: FuncKey, session_key: str) -> List[Component]:
|
||||
''' Marks that the job should be stopped'''
|
||||
session_info, job_info = self._get_call_info(func_key, session_key)
|
||||
if job_info is None:
|
||||
return f"Session {session_key} was not running function {func_key}"
|
||||
job_info.should_stop.set()
|
||||
return "Stopping after current batch finishes"
|
||||
|
||||
def _get_call_info(self, func_key: FuncKey, session_key: str) -> Tuple[SessionInfo, JobInfo]:
|
||||
''' Helper to get the SessionInfo and JobInfo. '''
|
||||
session_info = self._sessions.get(session_key, None)
|
||||
if not session_info:
|
||||
print(f"Couldn't find session {session_key} for call to {func_key}")
|
||||
return None, None
|
||||
|
||||
job_info = session_info.jobs.get(func_key, None)
|
||||
if not job_info:
|
||||
job_info = session_info.finished_jobs.get(func_key, None)
|
||||
if not job_info:
|
||||
print(f"Couldn't find job {func_key} in session {session_key}")
|
||||
return session_info, None
|
||||
|
||||
return session_info, job_info
|
||||
|
||||
def _run_queued_jobs(self) -> None:
|
||||
''' Runs queued jobs for any available slots '''
|
||||
if self._avail_job_tokens:
|
||||
try:
|
||||
# Notify next queued job it may begin
|
||||
queue_item = self._job_queue.pop(0)
|
||||
queue_item.wait_event.set()
|
||||
|
||||
# Check again in a few seconds, just in case the queued
|
||||
# waiter closed the browser while still queued
|
||||
Timer(3.0, self._run_queued_jobs).start()
|
||||
except IndexError:
|
||||
pass # No queued jobs
|
||||
|
||||
def _pre_call_func(
|
||||
self, func_key: FuncKey, output_dummy_obj: Component, refresh_btn: gr.Button, stop_btn: gr.Button,
|
||||
status_text: gr.Textbox, session_key: str) -> List[Component]:
|
||||
''' Called when a job is about to start '''
|
||||
session_info, job_info = self._get_call_info(func_key, session_key)
|
||||
|
||||
# If we didn't already get a token then queue up for one
|
||||
if job_info.job_token is None:
|
||||
job_info.token = self._get_job_token(block=True)
|
||||
|
||||
# Buttons don't seem to update unless value is set on them as well...
|
||||
return {output_dummy_obj: triggerChangeEvent(),
|
||||
refresh_btn: gr.Button.update(variant="primary", value=refresh_btn.value),
|
||||
stop_btn: gr.Button.update(variant="primary", value=stop_btn.value),
|
||||
status_text: gr.Textbox.update(value="Generation has started. Click 'Refresh' for updates")
|
||||
}
|
||||
|
||||
def _call_func(self, func_key: FuncKey, session_key: str) -> List[Component]:
|
||||
''' Runs the real function with job management. '''
|
||||
session_info, job_info = self._get_call_info(func_key, session_key)
|
||||
if session_info is None or job_info is None:
|
||||
return []
|
||||
|
||||
try:
|
||||
outputs = job_info.func(*job_info.inputs, job_info=job_info)
|
||||
except Exception as e:
|
||||
job_info.job_status = f"Error: {e}"
|
||||
print(f"Exception processing job {job_info}: {e}")
|
||||
outputs = []
|
||||
|
||||
# Filter the function output for any removed outputs
|
||||
filtered_output = []
|
||||
for idx, output in enumerate(outputs):
|
||||
if idx not in job_info.removed_output_idxs:
|
||||
filtered_output.append(output)
|
||||
|
||||
job_info.finished = True
|
||||
session_info.finished_jobs[func_key] = session_info.jobs.pop(func_key)
|
||||
|
||||
self._release_job_token(job_info.job_token)
|
||||
|
||||
# The wrapper added a dummy JSON output. Append a random text string
|
||||
# to fire the dummy objects 'change' event to notify that the job is done
|
||||
filtered_output.append(triggerChangeEvent())
|
||||
|
||||
return tuple(filtered_output)
|
||||
|
||||
def _post_call_func(
|
||||
self, func_key: FuncKey, output_dummy_obj: Component, refresh_btn: gr.Button, stop_btn: gr.Button,
|
||||
status_text: gr.Textbox, session_key: str) -> List[Component]:
|
||||
''' Called when a job completes '''
|
||||
return {output_dummy_obj: triggerChangeEvent(),
|
||||
refresh_btn: gr.Button.update(variant="secondary", value=refresh_btn.value),
|
||||
stop_btn: gr.Button.update(variant="secondary", value=stop_btn.value),
|
||||
status_text: gr.Textbox.update(value="Generation has finished!")
|
||||
}
|
||||
|
||||
def _update_gallery_event(self, func_key: FuncKey, session_key: str) -> List[Component]:
|
||||
''' Updates the gallery with results from the given job.
|
||||
Frees the images after return if the job is finished.
|
||||
Triggered by changing the update_gallery_obj dummy object '''
|
||||
session_info, job_info = self._get_call_info(func_key, session_key)
|
||||
if session_info is None or job_info is None:
|
||||
return []
|
||||
|
||||
if job_info.finished:
|
||||
session_info.finished_jobs.pop(func_key)
|
||||
|
||||
return job_info.images
|
||||
|
||||
def _wrap_func(
|
||||
self, func: Callable, inputs: List[Component], outputs: List[Component],
|
||||
refresh_btn: gr.Button = None, stop_btn: gr.Button = None,
|
||||
status_text: Optional[gr.Textbox] = None) -> Tuple[Callable, List[Component]]:
|
||||
''' handles JobManageUI's wrap_func'''
|
||||
|
||||
assert gr.context.Context.block is not None, "wrap_func must be called within a 'gr.Blocks' 'with' context"
|
||||
|
||||
# Create a unique key for this job
|
||||
func_key = FuncKey(job_id=uuid.uuid4(), func=func)
|
||||
|
||||
# Create a unique session key (next gradio release can use gr.State, see https://gradio.app/state_in_blocks/)
|
||||
if self._session_key is None:
|
||||
# When this gradio object is received as an event handler input it will resolve to a unique per-session id
|
||||
self._session_key = gr.JSON(value=lambda: uuid.uuid4().hex, visible=False,
|
||||
elem_id="JobManagerDummyObject_sessionKey")
|
||||
|
||||
# Pull the gallery out of the original outputs and assign it to the gallery update dummy object
|
||||
gallery_comp = None
|
||||
removed_idxs = []
|
||||
for idx, comp in enumerate(outputs):
|
||||
if isinstance(comp, Gallery):
|
||||
removed_idxs.append(idx)
|
||||
gallery_comp = comp
|
||||
del outputs[idx]
|
||||
break
|
||||
|
||||
# Add the session key to the inputs
|
||||
inputs += [self._session_key]
|
||||
|
||||
# Create dummy objects
|
||||
update_gallery_obj = gr.JSON(visible=False, elem_id="JobManagerDummyObject")
|
||||
update_gallery_obj.change(
|
||||
partial(self._update_gallery_event, func_key),
|
||||
[self._session_key],
|
||||
[gallery_comp]
|
||||
)
|
||||
|
||||
if refresh_btn:
|
||||
refresh_btn.variant = 'secondary'
|
||||
refresh_btn.click(
|
||||
partial(self._refresh_func, func_key),
|
||||
[self._session_key],
|
||||
[update_gallery_obj, status_text]
|
||||
)
|
||||
|
||||
if stop_btn:
|
||||
stop_btn.variant = 'secondary'
|
||||
stop_btn.click(
|
||||
partial(self._stop_wrapped_func, func_key),
|
||||
[self._session_key],
|
||||
[status_text]
|
||||
)
|
||||
|
||||
# (ab)use gr.JSON to forward events.
|
||||
# The gr.JSON object will fire its 'change' event when it is modified by being the output
|
||||
# of another component. This allows a method to forward events and allow multiple components
|
||||
# to update the gallery (without locking it).
|
||||
|
||||
# For example, the update_gallery_obj will update the gallery as in output of its 'change' event.
|
||||
# When its content changes it will update the gallery with the most recent images available from
|
||||
# the JobInfo. Now, eg, testComponent can have update_gallery_obj as an output and write random text
|
||||
# to it. This will trigger an update to the gallery, but testComponent didn't need to have
|
||||
# update_gallery_obj listed as an output, which would have locked it.
|
||||
|
||||
# Since some parameters are optional it makes sense to use the 'dict' return value type, which requires
|
||||
# the Component as a key... so group together the UI components that the event listeners are going to update
|
||||
# to make it easy to append to function calls and outputs
|
||||
job_ui_params = [refresh_btn, stop_btn, status_text]
|
||||
job_ui_outputs = [comp for comp in job_ui_params if comp is not None]
|
||||
|
||||
# Here a chain is constructed that will make a 'pre' call, a 'run' call, and a 'post' call,
|
||||
# to be able to update the UI before and after, as well as run the actual call
|
||||
post_call_dummyobj = gr.JSON(visible=False, elem_id="JobManagerDummyObject_postCall")
|
||||
post_call_dummyobj.change(
|
||||
partial(self._post_call_func, func_key, update_gallery_obj, *job_ui_params),
|
||||
[self._session_key],
|
||||
[update_gallery_obj] + job_ui_outputs
|
||||
)
|
||||
|
||||
call_dummyobj = gr.JSON(visible=False, elem_id="JobManagerDummyObject_runCall")
|
||||
call_dummyobj.change(
|
||||
partial(self._call_func, func_key),
|
||||
[self._session_key],
|
||||
outputs + [post_call_dummyobj]
|
||||
)
|
||||
|
||||
pre_call_dummyobj = gr.JSON(visible=False, elem_id="JobManagerDummyObject_preCall")
|
||||
pre_call_dummyobj.change(
|
||||
partial(self._pre_call_func, func_key, call_dummyobj, *job_ui_params),
|
||||
[self._session_key],
|
||||
[call_dummyobj] + job_ui_outputs
|
||||
)
|
||||
|
||||
# Now replace the original function with one that creates a JobInfo and triggers the dummy obj
|
||||
|
||||
def wrapped_func(*inputs):
|
||||
session_key = inputs[-1]
|
||||
inputs = inputs[:-1]
|
||||
|
||||
# Get or create a session for this key
|
||||
session_info = self._sessions.setdefault(session_key, SessionInfo())
|
||||
|
||||
# Is this session already running this job?
|
||||
if func_key in session_info.jobs:
|
||||
return {status_text: "This session is already running that function!"}
|
||||
|
||||
job_token = self._get_job_token(block=False)
|
||||
job = JobInfo(inputs=inputs, func=func, removed_output_idxs=removed_idxs, session_key=session_key,
|
||||
job_token=job_token)
|
||||
session_info.jobs[func_key] = job
|
||||
|
||||
ret = {pre_call_dummyobj: triggerChangeEvent()}
|
||||
if job_token is None:
|
||||
ret[status_text] = "Job is queued"
|
||||
return ret
|
||||
|
||||
return wrapped_func, inputs, [pre_call_dummyobj, status_text]
|
50
webui.py
50
webui.py
@ -1,8 +1,9 @@
|
||||
import argparse, os, sys, glob, re
|
||||
|
||||
from frontend.frontend import draw_gradio_ui
|
||||
from frontend.job_manager import JobManager, JobInfo
|
||||
from frontend.ui_functions import resize_image
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument("--ckpt", type=str, default="models/ldm/stable-diffusion-v1/model.ckpt", help="path to checkpoint of model",)
|
||||
parser.add_argument("--cli", type=str, help="don't launch web server, take Python function kwargs from this file.", default=None)
|
||||
parser.add_argument("--config", type=str, default="configs/stable-diffusion/v1-inference.yaml", help="path to config which constructs model",)
|
||||
@ -36,6 +37,8 @@ parser.add_argument("--share-password", type=str, help="Sharing is open by defau
|
||||
parser.add_argument("--share", action='store_true', help="Should share your server on gradio.app, this allows you to use the UI from your mobile app", default=False)
|
||||
parser.add_argument("--skip-grid", action='store_true', help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", default=False)
|
||||
parser.add_argument("--skip-save", action='store_true', help="do not save indiviual samples. For speed measurements.", default=False)
|
||||
parser.add_argument('--no-job-manager', action='store_true', help="Don't use the experimental job manager on top of gradio", default=False)
|
||||
parser.add_argument("--max-jobs", type=int, help="Maximum number of concurrent 'generate' commands", default=1)
|
||||
opt = parser.parse_args()
|
||||
|
||||
#Should not be needed anymore
|
||||
@ -60,7 +63,7 @@ import torch
|
||||
import torch.nn as nn
|
||||
import yaml
|
||||
import glob
|
||||
from typing import List, Union
|
||||
from typing import List, Union, Dict
|
||||
from pathlib import Path
|
||||
from collections import namedtuple
|
||||
|
||||
@ -104,6 +107,12 @@ LDSR_dir = opt.ldsr_dir
|
||||
if opt.optimized_turbo:
|
||||
opt.optimized = True
|
||||
|
||||
if opt.no_job_manager:
|
||||
job_manager = None
|
||||
else:
|
||||
job_manager = JobManager(opt.max_jobs)
|
||||
opt.max_jobs += 1 # Leave a free job open for button clicks
|
||||
|
||||
# should probably be moved to a settings menu in the UI at some point
|
||||
grid_format = [s.lower() for s in opt.grid_format.split(':')]
|
||||
grid_lossless = False
|
||||
@ -754,7 +763,7 @@ def process_images(
|
||||
fp, ddim_eta=0.0, do_not_save_grid=False, normalize_prompt_weights=True, init_img=None, init_mask=None,
|
||||
keep_mask=False, mask_blur_strength=3, denoising_strength=0.75, resize_mode=None, uses_loopback=False,
|
||||
uses_random_seed_loopback=False, sort_samples=True, write_info_files=True, write_sample_info_to_log_file=False, jpg_sample=False,
|
||||
variant_amount=0.0, variant_seed=None,imgProcessorTask=True):
|
||||
variant_amount=0.0, variant_seed=None,imgProcessorTask=True, job_info: JobInfo = None):
|
||||
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
|
||||
assert prompt is not None
|
||||
torch_gc()
|
||||
@ -816,7 +825,10 @@ def process_images(
|
||||
all_seeds = [seed + x for x in range(len(all_prompts))]
|
||||
|
||||
precision_scope = autocast if opt.precision == "autocast" else nullcontext
|
||||
output_images = []
|
||||
if job_info:
|
||||
output_images = job_info.images
|
||||
else:
|
||||
output_images = []
|
||||
grid_captions = []
|
||||
stats = []
|
||||
with torch.no_grad(), precision_scope("cuda"), (model.ema_scope() if not opt.optimized else nullcontext()):
|
||||
@ -837,11 +849,20 @@ def process_images(
|
||||
all_seeds[si] += target_seed_randomizer
|
||||
|
||||
for n in range(n_iter):
|
||||
if job_info and job_info.should_stop.is_set():
|
||||
print("Early exit requested")
|
||||
break
|
||||
|
||||
print(f"Iteration: {n+1}/{n_iter}")
|
||||
prompts = all_prompts[n * batch_size:(n + 1) * batch_size]
|
||||
captions = prompt_matrix_parts[n * batch_size:(n + 1) * batch_size]
|
||||
seeds = all_seeds[n * batch_size:(n + 1) * batch_size]
|
||||
|
||||
if job_info:
|
||||
job_info.job_status = f"Processing Iteration {n+1}/{n_iter}. Batch size {batch_size}"
|
||||
for idx,(p,s) in enumerate(zip(prompts,seeds)):
|
||||
job_info.job_status += f"\nItem {idx}: Seed {s}\nPrompt: {p}"
|
||||
|
||||
if opt.optimized:
|
||||
modelCS.to(device)
|
||||
uc = (model if not opt.optimized else modelCS).get_learned_conditioning(len(prompts) * [""])
|
||||
@ -1034,7 +1055,7 @@ Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_0
|
||||
|
||||
def txt2img(prompt: str, ddim_steps: int, sampler_name: str, toggles: List[int], realesrgan_model_name: str,
|
||||
ddim_eta: float, n_iter: int, batch_size: int, cfg_scale: float, seed: Union[int, str, None],
|
||||
height: int, width: int, fp, variant_amount: float = None, variant_seed: int = None):
|
||||
height: int, width: int, fp, variant_amount: float = None, variant_seed: int = None, job_info: JobInfo = None):
|
||||
outpath = opt.outdir_txt2img or opt.outdir or "outputs/txt2img-samples"
|
||||
err = False
|
||||
seed = seed_to_int(seed)
|
||||
@ -1105,6 +1126,7 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, toggles: List[int],
|
||||
jpg_sample=jpg_sample,
|
||||
variant_amount=variant_amount,
|
||||
variant_seed=variant_seed,
|
||||
job_info=job_info,
|
||||
)
|
||||
|
||||
del sampler
|
||||
@ -1162,9 +1184,9 @@ class Flagging(gr.FlaggingCallback):
|
||||
print("Logged:", filenames[0])
|
||||
|
||||
|
||||
def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask_blur_strength: int, ddim_steps: int, sampler_name: str,
|
||||
toggles: List[int], realesrgan_model_name: str, n_iter: int, cfg_scale: float, denoising_strength: float,
|
||||
seed: int, height: int, width: int, resize_mode: int, fp=None):
|
||||
def img2img(prompt: str, image_editor_mode: str, init_info: Dict[str,Image.Image], mask_mode: str, mask_blur_strength: int, ddim_steps: int, sampler_name: str,
|
||||
toggles: List[int], realesrgan_model_name: str, n_iter: int, cfg_scale: float, denoising_strength: float,
|
||||
seed: int, height: int, width: int, resize_mode: int, fp = None, job_info: JobInfo = None):
|
||||
outpath = opt.outdir_img2img or opt.outdir or "outputs/img2img-samples"
|
||||
err = False
|
||||
seed = seed_to_int(seed)
|
||||
@ -1349,6 +1371,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask
|
||||
write_info_files=write_info_files,
|
||||
write_sample_info_to_log_file=write_sample_info_to_log_file,
|
||||
jpg_sample=jpg_sample,
|
||||
job_info=job_info
|
||||
)
|
||||
|
||||
if initial_seed is None:
|
||||
@ -1405,6 +1428,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info, mask_mode: str, mask
|
||||
write_info_files=write_info_files,
|
||||
write_sample_info_to_log_file=write_sample_info_to_log_file,
|
||||
jpg_sample=jpg_sample,
|
||||
job_info=job_info
|
||||
)
|
||||
|
||||
del sampler
|
||||
@ -1417,10 +1441,10 @@ prompt_parser = re.compile("""
|
||||
(?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:'
|
||||
) # end 'prompt'
|
||||
(?: # non-capture group
|
||||
:+ # match one or more ':' characters
|
||||
:+ # match one or more ':' characters
|
||||
(?P<weight> # capture group for 'weight'
|
||||
-?\d+(?:\.\d+)? # match positive or negative integer or decimal number
|
||||
)? # end weight capture group, make optional
|
||||
)? # end weight capture group, make optional
|
||||
\s* # strip spaces after weight
|
||||
| # OR
|
||||
$ # else, if no ':' then match end of line
|
||||
@ -1743,7 +1767,6 @@ def imgproc(image,image_batch,imgproc_prompt,imgproc_toggles, imgproc_upscale_to
|
||||
if 1 not in imgproc_toggles:
|
||||
output.append(image)
|
||||
save_sample(image, outpathDir, outFilename, False, None, None, None, None, None, None, None, None, None, None, None, None, None, False, None, None, None, None, None, None, None, None, None, True)
|
||||
|
||||
if 1 in imgproc_toggles:
|
||||
if imgproc_upscale_toggles == 0:
|
||||
|
||||
@ -2061,7 +2084,8 @@ demo = draw_gradio_ui(opt,
|
||||
GFPGAN=GFPGAN,
|
||||
LDSR=LDSR,
|
||||
run_GFPGAN=run_GFPGAN,
|
||||
run_RealESRGAN=run_RealESRGAN
|
||||
run_RealESRGAN=run_RealESRGAN,
|
||||
job_manager=job_manager
|
||||
)
|
||||
|
||||
class ServerLauncher(threading.Thread):
|
||||
@ -2080,7 +2104,7 @@ class ServerLauncher(threading.Thread):
|
||||
'share': opt.share
|
||||
}
|
||||
if not opt.share:
|
||||
demo.queue(concurrency_count=1)
|
||||
demo.queue(concurrency_count=opt.max_jobs)
|
||||
if opt.share and opt.share_password:
|
||||
gradio_params['auth'] = ('webui', opt.share_password)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user