stable-diffusion-webui/webui.py

109 lines
3.0 KiB
Python
Raw Normal View History

import os
import threading
from modules import devices
from modules.paths import script_path
import signal
2022-08-22 17:15:46 +03:00
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.ui
import modules.scripts
import modules.sd_hijack
2022-09-07 12:32:28 +03:00
import modules.codeformer_model
import modules.gfpgan_model
import modules.face_restoration
import modules.realesrgan_model as realesrgan
2022-09-04 18:54:12 +03:00
import modules.esrgan_model as esrgan
import modules.ldsr_model as ldsr
2022-09-11 18:48:36 +03:00
import modules.extras
import modules.lowvram
import modules.txt2img
import modules.img2img
2022-09-20 16:41:25 +03:00
import modules.swinir as swinir
import modules.sd_models
2022-09-07 12:32:28 +03:00
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
2022-09-04 18:54:12 +03:00
esrgan.load_models(cmd_opts.esrgan_models_path)
2022-09-20 20:08:31 +03:00
swinir.load_models(cmd_opts.swinir_models_path)
realesrgan.setup_realesrgan()
ldsr.add_lsdr()
queue_lock = threading.Lock()
2022-09-11 18:48:36 +03:00
def wrap_queued_call(func):
def f(*args, **kwargs):
with queue_lock:
res = func(*args, **kwargs)
2022-08-22 17:15:46 +03:00
return res
return f
def wrap_gradio_gpu_call(func):
def f(*args, **kwargs):
devices.torch_gc()
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
2022-09-25 15:45:20 +03:00
shared.state.job_timestamp = shared.state.get_job_timestamp()
2022-09-06 19:33:51 +03:00
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
devices.torch_gc()
return res
return modules.ui.wrap_gradio_call(f)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
def webui():
2022-09-06 08:54:11 +03:00
# make the program just exit at ctrl+c without waiting for anything
def sigint_handler(sig, frame):
2022-09-06 19:33:51 +03:00
print(f'Interrupted with signal {sig} in {frame}')
2022-09-06 08:54:11 +03:00
os._exit(0)
2022-08-22 17:15:46 +03:00
2022-09-06 08:54:11 +03:00
signal.signal(signal.SIGINT, sigint_handler)
2022-09-06 08:54:11 +03:00
demo = modules.ui.create_ui(
txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
2022-09-26 02:22:12 +03:00
run_pnginfo=modules.extras.run_pnginfo,
run_modelmerger=modules.extras.run_modelmerger
2022-09-06 08:54:11 +03:00
)
demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
debug=cmd_opts.gradio_debug,
2022-09-12 15:52:16 +03:00
auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
inbrowser=cmd_opts.autolaunch,
)
2022-09-11 18:48:36 +03:00
if __name__ == "__main__":
webui()