mirror of
https://github.com/Sygil-Dev/sygil-webui.git
synced 2024-12-15 06:21:34 +03:00
Img2img dev (#736)
* #715 #699 #698 #663 #625 #617 #611 #604 (#716) * Update README.md * Add sampler name to metadata (#695) Co-authored-by: EliEron <example@example.com> * old-dev-merge Co-authored-by: EliEron <subanimehd@gmail.com> Co-authored-by: EliEron <example@example.com> * img2img-fix (#717) * Revert "img2img-fix (#717)" This reverts commit70d4b1ca2a
. * img2img fixes * Revert "img2img fixes" This reverts commite66eddc621
. * Revert "Revert "img2img-fix (#717)"" This reverts commitbf08b617d4
. * img2img fixed * feat: bring back Crop mode, formatting Co-authored-by: EliEron <subanimehd@gmail.com> Co-authored-by: EliEron <example@example.com> Co-authored-by: Thomas Mello <work.mello@gmail.com>
This commit is contained in:
parent
2b1ac8daf7
commit
490bbbc103
@ -4,15 +4,16 @@ txt2img:
|
||||
prompt:
|
||||
ddim_steps: 50
|
||||
# Adding an int to toggles enables the corresponding feature.
|
||||
# 0: Create prompt matrix (separate multiple prompts using |, and get all combinations of them)
|
||||
# 1: Normalize Prompt Weights (ensure sum of weights add up to 1.0)
|
||||
# 2: Save individual images
|
||||
# 3: Save grid
|
||||
# 4: Sort samples by prompt
|
||||
# 5: Write sample info files
|
||||
# 6: jpg samples
|
||||
# 7: Fix faces using GFPGAN
|
||||
# 8: Upscale images using Real-ESRGAN
|
||||
# 0: Create prompt matrix (separate multiple prompts using |, and get all combinations of them)
|
||||
# 1: Normalize Prompt Weights (ensure sum of weights add up to 1.0)
|
||||
# 2: Save individual images
|
||||
# 3: Save grid
|
||||
# 4: Sort samples by prompt
|
||||
# 5: Write sample info files
|
||||
# 6: write sample info to log file
|
||||
# 7: jpg samples
|
||||
# 8: Fix faces using GFPGAN
|
||||
# 9: Upscale images using RealESRGAN
|
||||
toggles: [1, 2, 3, 4, 5]
|
||||
sampler_name: k_lms
|
||||
ddim_eta: 0.0 # legacy name, applies to all algorithms.
|
||||
|
@ -38,4 +38,4 @@ dependencies:
|
||||
- -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN
|
||||
- -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan
|
||||
- -e git+https://github.com/hlky/k-diffusion-sd#egg=k_diffusion
|
||||
- -e .
|
||||
- -e .
|
209
frontend/css/custom.css
Normal file
209
frontend/css/custom.css
Normal file
@ -0,0 +1,209 @@
|
||||
/* ----------------------------------------------
|
||||
* Generated by Animista on 2022-9-3 12:0:51
|
||||
* Licensed under FreeBSD License.
|
||||
* See http://animista.net/license for more info.
|
||||
* w: http://animista.net, t: @cssanimista
|
||||
* ---------------------------------------------- */
|
||||
|
||||
/**
|
||||
* ----------------------------------------
|
||||
* animation fade-in
|
||||
* ----------------------------------------
|
||||
*/
|
||||
@-webkit-keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
@keyframes fade-in {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* CSS HEX */
|
||||
:root {
|
||||
--eerie-black: #141414ff;
|
||||
--jet: #373737ff;
|
||||
--white: #ffffffff;
|
||||
--rajah: #faa549ff;
|
||||
--tart-orange: #9c85fb;
|
||||
--light-steel-blue: #b7d0f1ff;
|
||||
}
|
||||
/* Gallery items (not working) */
|
||||
.gallery-item.svelte-1g9btlg.svelte-1g9btlg{
|
||||
border: none!important;
|
||||
}
|
||||
/* Loading background */
|
||||
.dark .wrap.svelte-2fvq7v{
|
||||
background-color: #373737ff;
|
||||
}
|
||||
|
||||
/* generate button */
|
||||
#generate, #img2img_mask_btn, #img2img_edit_btn{
|
||||
transition: 0.3s;
|
||||
color: #9c85fb!important;
|
||||
border-color: #9c85fb!important;
|
||||
}
|
||||
|
||||
#generate:hover, #img2img_mask_btn:hover, #img2img_edit_btn:hover{
|
||||
color: #141414ff!important;
|
||||
background-color: #9c85fb!important;
|
||||
}
|
||||
|
||||
|
||||
/* Generation paramters */
|
||||
#highlight > div.textfield.bg-white.dark\:bg-transparent.rounded-sm.text-sm.box-border.max-w-full.break-word.leading-7.mt-7 > span:nth-child(2){
|
||||
background: none!important;
|
||||
color: white!important;
|
||||
}
|
||||
#highlight > div.textfield.bg-white.dark\:bg-transparent.rounded-sm.text-sm.box-border.max-w-full.break-word.leading-7.mt-7 > span:nth-child(4){
|
||||
background: none!important;
|
||||
color: white!important;
|
||||
}
|
||||
#highlight > div.textfield.bg-white.dark\:bg-transparent.rounded-sm.text-sm.box-border.max-w-full.break-word.leading-7.mt-7 > span:nth-child(6){
|
||||
background: none!important;
|
||||
color: white!important;
|
||||
}
|
||||
#highlight > div.textfield.bg-white.dark\:bg-transparent.rounded-sm.text-sm.box-border.max-w-full.break-word.leading-7.mt-7 > span:nth-child(8){
|
||||
background: none!important;
|
||||
color: white!important;
|
||||
}
|
||||
#highlight > div.textfield.bg-white.dark\:bg-transparent.rounded-sm.text-sm.box-border.max-w-full.break-word.leading-7.mt-7 > span:nth-child(10){
|
||||
background: none!important;
|
||||
color: white!important;
|
||||
}
|
||||
#highlight > div.textfield.bg-white.dark\:bg-transparent.rounded-sm.text-sm.box-border.max-w-full.break-word.leading-7.mt-7 > span:nth-child(12){
|
||||
background: none!important;
|
||||
color: white!important;
|
||||
}
|
||||
|
||||
/* Mask background */
|
||||
.dark .bg-gray-200{
|
||||
background-color:rgba(55, 55, 55, 0.105)!important;
|
||||
}
|
||||
.cropper-wrap-box, .cropper-canvas{
|
||||
background-color:rgba(55, 55, 55, 0.105)!important;
|
||||
}
|
||||
.cropper-bg {
|
||||
background: none!important;
|
||||
}
|
||||
|
||||
select {
|
||||
background:#000;
|
||||
color:#fff;
|
||||
}
|
||||
|
||||
select * {
|
||||
background:#373737ff;
|
||||
color:#9c85fb;
|
||||
}
|
||||
|
||||
/* General Background */
|
||||
.gradio-container {background:#141414ff;}
|
||||
|
||||
/*General Text color on hover */
|
||||
.dark .hover\:text-gray-700:hover{
|
||||
color: #9d85fb8a!important;
|
||||
}
|
||||
/*General Text color */
|
||||
.text-gray-400{
|
||||
color:rgba(255, 255, 255, 0.504);
|
||||
transition: 0.3s;
|
||||
}
|
||||
|
||||
/* General container of everything */
|
||||
.dark .dark\:bg-gray-950 {background-color: #141414ff!important;
|
||||
-webkit-animation: fade-in 1s ease-in both;
|
||||
animation: fade-in 1s ease-in both;
|
||||
}
|
||||
/* labels in frames of gallery */
|
||||
.dark .dark\:bg-gray-900 {
|
||||
background-color:#9d85fbdf!important;
|
||||
border: none!important;}
|
||||
/* Background for Gradio stuff along with colors for text */
|
||||
.dark .gr-box {
|
||||
|
||||
|
||||
background-color:rgba(55, 55, 55, 0.105)!important;
|
||||
border: solid 0.5px!important;
|
||||
border-color: rgba(55, 55, 55, 0.055)!important;
|
||||
/* sampler selector color */
|
||||
color: #9c85fb!important;}
|
||||
|
||||
/* Secondary Buttons color */
|
||||
.dark .gr-button-secondary{
|
||||
color: #9c85fb;
|
||||
border-color: #9d85fb5c;
|
||||
transition: 0.3s;}
|
||||
|
||||
.dark .gr-button-secondary:hover{
|
||||
color: #141414ff!important;
|
||||
background-color: #9c85fb!important;}
|
||||
|
||||
.dark .gr-button-primary{
|
||||
color: #9c85fb;
|
||||
border-color: #9d85fb5c;
|
||||
transition: 0.3s;}
|
||||
|
||||
.dark .gr-button-primary:hover{
|
||||
color: #141414ff!important;
|
||||
background-color: #9c85fb!important;}
|
||||
|
||||
/* image lab process button */
|
||||
div[id*="111"]{
|
||||
width: 50% !important;
|
||||
align-self: center !important;
|
||||
}
|
||||
/* Selected tabs color */
|
||||
button, input, optgroup, select, textarea {color: #9c85fb;!important}
|
||||
|
||||
/* -or- text color wtf */
|
||||
.text-gray-300{
|
||||
color:rgba(255, 255, 255, 0.504);
|
||||
}
|
||||
|
||||
/* Sliders colors */
|
||||
button, input, optgroup, select, textarea{
|
||||
accent-color: #9c85fb!important;
|
||||
}
|
||||
/* Text color for placeholder in prompt */
|
||||
input.scroll-hide.block.gr-box.gr-input.w-full.gr-text-input::placeholder{
|
||||
color: #9d85fb5c;
|
||||
transition: 0.3s;
|
||||
}
|
||||
/* disabling borders for stuff */
|
||||
.border-gray-200{
|
||||
/* no border */
|
||||
border: none;
|
||||
}
|
||||
|
||||
.border-b-2{
|
||||
/* no border */
|
||||
border: none;
|
||||
}
|
||||
/* disabling backgrounds for labels and buttons */
|
||||
button, select, textarea {
|
||||
background: none!important;
|
||||
}
|
||||
/* radio selection border and background */
|
||||
.dark .gr-input-label{
|
||||
background: none!important;
|
||||
border: none!important;
|
||||
}
|
||||
/* checkbox and radio buttons color when checked */
|
||||
.dark .gr-check-radio:checked{
|
||||
background-color: #9c85fb!important;
|
||||
}
|
||||
/* checkbox and radio buttons color when unchecked */
|
||||
.dark .gr-check-radio{
|
||||
background-color: #373737ff!important;
|
||||
}
|
||||
|
@ -4,12 +4,14 @@ from frontend.job_manager import JobManager
|
||||
import frontend.ui_functions as uifn
|
||||
import uuid
|
||||
|
||||
def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda x: x, txt2img_defaults={}, RealESRGAN=True, GFPGAN=True,LDSR=True,
|
||||
|
||||
def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda x: x, txt2img_defaults={},
|
||||
RealESRGAN=True, GFPGAN=True, LDSR=True,
|
||||
txt2img_toggles={}, txt2img_toggle_defaults='k_euler', show_embeddings=False, img2img_defaults={},
|
||||
img2img_toggles={}, img2img_toggle_defaults={}, sample_img2img=None, img2img_mask_modes=None,
|
||||
img2img_resize_modes=None, imgproc_defaults={},imgproc_mode_toggles={},user_defaults={}, run_GFPGAN=lambda x: x, run_RealESRGAN=lambda x: x,
|
||||
img2img_resize_modes=None, imgproc_defaults={}, imgproc_mode_toggles={}, user_defaults={},
|
||||
run_GFPGAN=lambda x: x, run_RealESRGAN=lambda x: x,
|
||||
job_manager: JobManager = None) -> gr.Blocks:
|
||||
|
||||
with gr.Blocks(css=css(opt), analytics_enabled=False, title="Stable Diffusion WebUI") as demo:
|
||||
with gr.Tabs(elem_id='tabss') as tabs:
|
||||
with gr.TabItem("Text-to-Image", id='txt2img_tab'):
|
||||
@ -40,34 +42,37 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
|
||||
txt2img_job_ui = job_manager.draw_gradio_ui() if job_manager else None
|
||||
|
||||
txt2img_dimensions_info_text_box = gr.Textbox(label="Aspect ratio (4:3 = 1.333 | 16:9 = 1.777 | 21:9 = 2.333)")
|
||||
txt2img_dimensions_info_text_box = gr.Textbox(
|
||||
label="Aspect ratio (4:3 = 1.333 | 16:9 = 1.777 | 21:9 = 2.333)")
|
||||
with gr.Column():
|
||||
with gr.Box():
|
||||
output_txt2img_gallery = gr.Gallery(label="Images", elem_id="txt2img_gallery_output").style(grid=[4, 4])
|
||||
gr.Markdown("Select an image from the gallery, then click one of the buttons below to perform an action.")
|
||||
output_txt2img_gallery = gr.Gallery(label="Images", elem_id="txt2img_gallery_output").style(
|
||||
grid=[4, 4])
|
||||
gr.Markdown(
|
||||
"Select an image from the gallery, then click one of the buttons below to perform an action.")
|
||||
with gr.Row(elem_id='txt2img_actions_row'):
|
||||
gr.Button("Copy to clipboard").click(fn=None,
|
||||
inputs=output_txt2img_gallery,
|
||||
outputs=[],
|
||||
#_js=js_copy_to_clipboard( 'txt2img_gallery_output')
|
||||
)
|
||||
inputs=output_txt2img_gallery,
|
||||
outputs=[],
|
||||
# _js=js_copy_to_clipboard( 'txt2img_gallery_output')
|
||||
)
|
||||
output_txt2img_copy_to_input_btn = gr.Button("Push to img2img")
|
||||
output_txt2img_to_imglab = gr.Button("Send to Lab",visible=True)
|
||||
output_txt2img_to_imglab = gr.Button("Send to Lab", visible=True)
|
||||
|
||||
output_txt2img_params = gr.Highlightedtext(label="Generation parameters", interactive=False, elem_id='highlight')
|
||||
output_txt2img_params = gr.Highlightedtext(label="Generation parameters", interactive=False,
|
||||
elem_id='highlight')
|
||||
with gr.Group():
|
||||
with gr.Row(elem_id='txt2img_output_row'):
|
||||
output_txt2img_copy_params = gr.Button("Copy full parameters").click(
|
||||
inputs=[output_txt2img_params], outputs=[],
|
||||
_js=js_copy_txt2img_output,
|
||||
fn=None, show_progress=False)
|
||||
fn=None, show_progress=False)
|
||||
output_txt2img_seed = gr.Number(label='Seed', interactive=False, visible=False)
|
||||
output_txt2img_copy_seed = gr.Button("Copy only seed").click(
|
||||
inputs=[output_txt2img_seed], outputs=[],
|
||||
_js='(x) => navigator.clipboard.writeText(x)', fn=None, show_progress=False)
|
||||
output_txt2img_stats = gr.HTML(label='Stats')
|
||||
with gr.Column():
|
||||
|
||||
txt2img_steps = gr.Slider(minimum=1, maximum=250, step=1, label="Sampling Steps",
|
||||
value=txt2img_defaults['ddim_steps'])
|
||||
txt2img_sampling = gr.Dropdown(label='Sampling method (k_lms is default k-diffusion sampler)',
|
||||
@ -93,7 +98,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
choices=['RealESRGAN_x4plus',
|
||||
'RealESRGAN_x4plus_anime_6B'],
|
||||
value='RealESRGAN_x4plus',
|
||||
visible=False)#RealESRGAN is not None # invisible until removed) # TODO: Feels like I shouldnt slot it in here.
|
||||
visible=False) # RealESRGAN is not None # invisible until removed) # TODO: Feels like I shouldnt slot it in here.
|
||||
txt2img_ddim_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="DDIM ETA",
|
||||
value=txt2img_defaults['ddim_eta'], visible=False)
|
||||
txt2img_variant_amount = gr.Slider(minimum=0.0, maximum=1.0, label='Variation Amount',
|
||||
@ -133,7 +138,8 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
# txt2img_width.change(fn=uifn.update_dimensions_info, inputs=[txt2img_width, txt2img_height], outputs=txt2img_dimensions_info_text_box)
|
||||
# txt2img_height.change(fn=uifn.update_dimensions_info, inputs=[txt2img_width, txt2img_height], outputs=txt2img_dimensions_info_text_box)
|
||||
|
||||
live_prompt_params = [txt2img_prompt, txt2img_width, txt2img_height, txt2img_steps, txt2img_seed, txt2img_batch_count, txt2img_cfg]
|
||||
live_prompt_params = [txt2img_prompt, txt2img_width, txt2img_height, txt2img_steps, txt2img_seed,
|
||||
txt2img_batch_count, txt2img_cfg]
|
||||
txt2img_prompt.change(
|
||||
fn=None,
|
||||
inputs=live_prompt_params,
|
||||
@ -141,7 +147,6 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
_js=js_parse_prompt
|
||||
)
|
||||
|
||||
|
||||
with gr.TabItem("Image-to-Image Unified", id="img2img_tab"):
|
||||
with gr.Row(elem_id="prompt_row"):
|
||||
img2img_prompt = gr.Textbox(label="Prompt",
|
||||
@ -158,39 +163,58 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
with gr.Row().style(equal_height=False):
|
||||
with gr.Column():
|
||||
gr.Markdown('#### Img2Img Input')
|
||||
img2img_image_editor = gr.Image(value=sample_img2img, source="upload", interactive=True,
|
||||
type="pil", tool="select", elem_id="img2img_editor",
|
||||
image_mode="RGBA")
|
||||
img2img_image_mask = gr.Image(value=sample_img2img, source="upload", interactive=True,
|
||||
type="pil", tool="sketch", visible=False,
|
||||
elem_id="img2img_mask")
|
||||
img2img_image_mask = gr.Image(
|
||||
value=sample_img2img,
|
||||
source="upload",
|
||||
interactive=True,
|
||||
type="pil", tool="sketch",
|
||||
elem_id="img2img_mask",
|
||||
image_mode="RGBA"
|
||||
)
|
||||
img2img_image_editor = gr.Image(
|
||||
value=sample_img2img,
|
||||
source="upload",
|
||||
interactive=True,
|
||||
type="pil",
|
||||
tool="select",
|
||||
visible=False,
|
||||
image_mode="RGBA",
|
||||
elem_id="img2img_editor"
|
||||
)
|
||||
|
||||
with gr.Tabs():
|
||||
with gr.TabItem("Editor Options"):
|
||||
with gr.Row():
|
||||
img2img_image_editor_mode = gr.Radio(choices=["Mask", "Crop", "Uncrop"], label="Image Editor Mode",
|
||||
value="Crop", elem_id='edit_mode_select')
|
||||
# disable Uncrop for now
|
||||
# choices=["Mask", "Crop", "Uncrop"]
|
||||
img2img_image_editor_mode = gr.Radio(choices=["Mask", "Crop"],
|
||||
label="Image Editor Mode",
|
||||
value="Mask", elem_id='edit_mode_select',
|
||||
visible=True)
|
||||
img2img_mask = gr.Radio(choices=["Keep masked area", "Regenerate only masked area"],
|
||||
label="Mask Mode", type="index",
|
||||
value=img2img_mask_modes[img2img_defaults['mask_mode']], visible=False)
|
||||
label="Mask Mode", type="index",
|
||||
value=img2img_mask_modes[img2img_defaults['mask_mode']],
|
||||
visible=True)
|
||||
|
||||
img2img_mask_blur_strength = gr.Slider(minimum=1, maximum=10, step=1,
|
||||
label="How much blurry should the mask be? (to avoid hard edges)",
|
||||
value=3, visible=False)
|
||||
label="How much blurry should the mask be? (to avoid hard edges)",
|
||||
value=3, visible=False)
|
||||
|
||||
img2img_resize = gr.Radio(label="Resize mode",
|
||||
choices=["Just resize"],
|
||||
type="index",
|
||||
value=img2img_resize_modes[img2img_defaults['resize_mode']])
|
||||
choices=["Just resize", "Crop and resize",
|
||||
"Resize and fill"],
|
||||
type="index",
|
||||
value=img2img_resize_modes[
|
||||
img2img_defaults['resize_mode']], visible=False)
|
||||
|
||||
img2img_painterro_btn = gr.Button("Advanced Editor")
|
||||
with gr.TabItem("Hints"):
|
||||
img2img_help = gr.Markdown(visible=False, value=uifn.help_text)
|
||||
|
||||
|
||||
with gr.Column():
|
||||
gr.Markdown('#### Img2Img Results')
|
||||
output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style(grid=[4,4,4])
|
||||
output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style(
|
||||
grid=[4, 4, 4])
|
||||
img2img_job_ui = job_manager.draw_gradio_ui() if job_manager else None
|
||||
with gr.Tabs():
|
||||
with gr.TabItem("Generated image actions", id="img2img_actions_tab"):
|
||||
@ -206,7 +230,8 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
with gr.Row():
|
||||
output_img2img_copy_params = gr.Button("Copy full parameters").click(
|
||||
inputs=output_img2img_params, outputs=[],
|
||||
_js='(x) => {navigator.clipboard.writeText(x.replace(": ",":"))}', fn=None, show_progress=False)
|
||||
_js='(x) => {navigator.clipboard.writeText(x.replace(": ",":"))}', fn=None,
|
||||
show_progress=False)
|
||||
output_img2img_seed = gr.Number(label='Seed', interactive=False, visible=False)
|
||||
output_img2img_copy_seed = gr.Button("Copy only seed").click(
|
||||
inputs=output_img2img_seed, outputs=[],
|
||||
@ -229,7 +254,8 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
img2img_batch_count = gr.Slider(minimum=1, maximum=50, step=1,
|
||||
label='Batch count (how many batches of images to generate)',
|
||||
value=img2img_defaults['n_iter'])
|
||||
img2img_dimensions_info_text_box = gr.Textbox(label="Aspect ratio (4:3 = 1.333 | 16:9 = 1.777 | 21:9 = 2.333)")
|
||||
img2img_dimensions_info_text_box = gr.Textbox(
|
||||
label="Aspect ratio (4:3 = 1.333 | 16:9 = 1.777 | 21:9 = 2.333)")
|
||||
with gr.Column():
|
||||
img2img_steps = gr.Slider(minimum=1, maximum=250, step=1, label="Sampling Steps",
|
||||
value=img2img_defaults['ddim_steps'])
|
||||
@ -256,16 +282,22 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
|
||||
img2img_image_editor_mode.change(
|
||||
uifn.change_image_editor_mode,
|
||||
[img2img_image_editor_mode, img2img_image_editor, img2img_resize, img2img_width, img2img_height],
|
||||
[img2img_image_editor_mode,
|
||||
img2img_image_editor,
|
||||
img2img_image_mask,
|
||||
img2img_resize,
|
||||
img2img_width,
|
||||
img2img_height
|
||||
],
|
||||
[img2img_image_editor, img2img_image_mask, img2img_btn_editor, img2img_btn_mask,
|
||||
img2img_painterro_btn, img2img_mask, img2img_mask_blur_strength]
|
||||
)
|
||||
|
||||
img2img_image_editor.edit(
|
||||
uifn.update_image_mask,
|
||||
[img2img_image_editor, img2img_resize, img2img_width, img2img_height],
|
||||
img2img_image_mask
|
||||
)
|
||||
# img2img_image_editor_mode.change(
|
||||
# uifn.update_image_mask,
|
||||
# [img2img_image_editor, img2img_resize, img2img_width, img2img_height],
|
||||
# img2img_image_mask
|
||||
# )
|
||||
|
||||
output_txt2img_copy_to_input_btn.click(
|
||||
uifn.copy_img_to_input,
|
||||
@ -299,12 +331,13 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
)
|
||||
|
||||
img2img_func = img2img
|
||||
img2img_inputs = [img2img_prompt, img2img_image_editor_mode, img2img_image_editor, img2img_image_mask, img2img_mask,
|
||||
img2img_inputs = [img2img_prompt, img2img_image_editor_mode, img2img_mask,
|
||||
img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
|
||||
img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
|
||||
img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
|
||||
img2img_embeddings]
|
||||
img2img_outputs = [output_img2img_gallery, output_img2img_seed, output_img2img_params, output_img2img_stats]
|
||||
img2img_image_editor, img2img_image_mask, img2img_embeddings]
|
||||
img2img_outputs = [output_img2img_gallery, output_img2img_seed, output_img2img_params,
|
||||
output_img2img_stats]
|
||||
|
||||
# If a JobManager was passed in then wrap the Generate functions
|
||||
if img2img_job_ui:
|
||||
@ -319,10 +352,16 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
img2img_inputs,
|
||||
img2img_outputs
|
||||
)
|
||||
|
||||
def img2img_submit_params():
|
||||
# print([img2img_prompt, img2img_image_editor_mode, img2img_mask,
|
||||
# img2img_mask_blur_strength, img2img_steps, img2img_sampling, img2img_toggles,
|
||||
# img2img_realesrgan_model_name, img2img_batch_count, img2img_cfg,
|
||||
# img2img_denoising, img2img_seed, img2img_height, img2img_width, img2img_resize,
|
||||
# img2img_image_editor, img2img_image_mask, img2img_embeddings])
|
||||
return (img2img_func,
|
||||
img2img_inputs,
|
||||
img2img_outputs)
|
||||
img2img_inputs,
|
||||
img2img_outputs)
|
||||
|
||||
img2img_btn_editor.click(*img2img_submit_params())
|
||||
|
||||
@ -337,26 +376,31 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
_js=call_JS("Painterro.init", toId="img2img_editor")
|
||||
)
|
||||
|
||||
img2img_width.change(fn=uifn.update_dimensions_info, inputs=[img2img_width, img2img_height], outputs=img2img_dimensions_info_text_box)
|
||||
img2img_height.change(fn=uifn.update_dimensions_info, inputs=[img2img_width, img2img_height], outputs=img2img_dimensions_info_text_box)
|
||||
|
||||
with gr.TabItem("Image Lab", id='imgproc_tab'):
|
||||
gr.Markdown("Post-process results")
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
with gr.Tabs():
|
||||
with gr.TabItem('Single Image'):
|
||||
imgproc_source = gr.Image(label="Source", source="upload", interactive=True, type="pil",elem_id="imglab_input")
|
||||
img2img_width.change(fn=uifn.update_dimensions_info, inputs=[img2img_width, img2img_height],
|
||||
outputs=img2img_dimensions_info_text_box)
|
||||
img2img_height.change(fn=uifn.update_dimensions_info, inputs=[img2img_width, img2img_height],
|
||||
outputs=img2img_dimensions_info_text_box)
|
||||
|
||||
#gfpgan_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Effect strength",
|
||||
with gr.TabItem("Image Lab", id='imgproc_tab'):
|
||||
gr.Markdown("Post-process results")
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
with gr.Tabs():
|
||||
with gr.TabItem('Single Image'):
|
||||
imgproc_source = gr.Image(label="Source", source="upload", interactive=True, type="pil",
|
||||
elem_id="imglab_input")
|
||||
|
||||
# gfpgan_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Effect strength",
|
||||
# value=gfpgan_defaults['strength'])
|
||||
#select folder with images to process
|
||||
with gr.TabItem('Batch Process'):
|
||||
imgproc_folder = gr.File(label="Batch Process", file_count="multiple",source="upload", interactive=True, type="file")
|
||||
imgproc_pngnfo = gr.Textbox(label="PNG Metadata", placeholder="PngNfo", visible=False, max_lines=5)
|
||||
with gr.Row():
|
||||
imgproc_btn = gr.Button("Process", variant="primary")
|
||||
gr.HTML("""
|
||||
# select folder with images to process
|
||||
with gr.TabItem('Batch Process'):
|
||||
imgproc_folder = gr.File(label="Batch Process", file_count="multiple", source="upload",
|
||||
interactive=True, type="file")
|
||||
imgproc_pngnfo = gr.Textbox(label="PNG Metadata", placeholder="PngNfo", visible=False,
|
||||
max_lines=5)
|
||||
with gr.Row():
|
||||
imgproc_btn = gr.Button("Process", variant="primary")
|
||||
gr.HTML("""
|
||||
<div id="90" style="max-width: 100%; font-size: 14px; text-align: center;" class="output-markdown gr-prose border-solid border border-gray-200 rounded gr-panel">
|
||||
<p><b>Upscale Modes Guide</b></p>
|
||||
<p></p>
|
||||
@ -370,142 +414,170 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x,imgproc=lambda
|
||||
<p>A 8X upscaler with high VRAM usage, uses GoBig to add details and then uses a Latent Diffusion model to upscale the image, this will result in less artifacting/sharpeninng, use the settings to feed GoBig settings that will contribute to the result, this mode is considerbly slower</p>
|
||||
</div>
|
||||
""")
|
||||
with gr.Column():
|
||||
with gr.Tabs():
|
||||
with gr.TabItem('Output'):
|
||||
imgproc_output = gr.Gallery(label="Output", elem_id="imgproc_gallery_output")
|
||||
with gr.Row(elem_id="proc_options_row"):
|
||||
with gr.Box():
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>Processor Selection</b>")
|
||||
imgproc_toggles = gr.CheckboxGroup(label = '',choices=imgproc_mode_toggles, type="index")
|
||||
#.change toggles to show options
|
||||
#imgproc_toggles.change()
|
||||
with gr.Box(visible=False) as gfpgan_group:
|
||||
with gr.Column():
|
||||
with gr.Tabs():
|
||||
with gr.TabItem('Output'):
|
||||
imgproc_output = gr.Gallery(label="Output", elem_id="imgproc_gallery_output")
|
||||
with gr.Row(elem_id="proc_options_row"):
|
||||
with gr.Box():
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>Processor Selection</b>")
|
||||
imgproc_toggles = gr.CheckboxGroup(label='', choices=imgproc_mode_toggles,
|
||||
type="index")
|
||||
# .change toggles to show options
|
||||
# imgproc_toggles.change()
|
||||
with gr.Box(visible=False) as gfpgan_group:
|
||||
|
||||
gfpgan_defaults = {
|
||||
'strength': 100,
|
||||
}
|
||||
gfpgan_defaults = {
|
||||
'strength': 100,
|
||||
}
|
||||
|
||||
if 'gfpgan' in user_defaults:
|
||||
gfpgan_defaults.update(user_defaults['gfpgan'])
|
||||
if GFPGAN is None:
|
||||
gr.HTML("""
|
||||
if 'gfpgan' in user_defaults:
|
||||
gfpgan_defaults.update(user_defaults['gfpgan'])
|
||||
if GFPGAN is None:
|
||||
gr.HTML("""
|
||||
<div id="90" style="max-width: 100%; font-size: 14px; text-align: center;" class="output-markdown gr-prose border-solid border border-gray-200 rounded gr-panel">
|
||||
<p><b> Please download GFPGAN to activate face fixing features</b>, instructions are available at the <a href='https://github.com/hlky/stable-diffusion-webui'>Github</a></p>
|
||||
</div>
|
||||
""")
|
||||
#gr.Markdown("")
|
||||
#gr.Markdown("<b> Please download GFPGAN to activate face fixing features</b>, instructions are available at the <a href='https://github.com/hlky/stable-diffusion-webui'>Github</a>")
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>GFPGAN Settings</b>")
|
||||
imgproc_gfpgan_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Effect strength",
|
||||
value=gfpgan_defaults['strength'],visible=GFPGAN is not None)
|
||||
with gr.Box(visible=False) as upscale_group:
|
||||
# gr.Markdown("")
|
||||
# gr.Markdown("<b> Please download GFPGAN to activate face fixing features</b>, instructions are available at the <a href='https://github.com/hlky/stable-diffusion-webui'>Github</a>")
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>GFPGAN Settings</b>")
|
||||
imgproc_gfpgan_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.001,
|
||||
label="Effect strength",
|
||||
value=gfpgan_defaults['strength'],
|
||||
visible=GFPGAN is not None)
|
||||
with gr.Box(visible=False) as upscale_group:
|
||||
|
||||
if LDSR:
|
||||
upscaleModes = ['RealESRGAN','GoBig','Latent Diffusion SR','GoLatent ']
|
||||
else:
|
||||
gr.HTML("""
|
||||
if LDSR:
|
||||
upscaleModes = ['RealESRGAN', 'GoBig', 'Latent Diffusion SR', 'GoLatent ']
|
||||
else:
|
||||
gr.HTML("""
|
||||
<div id="90" style="max-width: 100%; font-size: 14px; text-align: center;" class="output-markdown gr-prose border-solid border border-gray-200 rounded gr-panel">
|
||||
<p><b> Please download LDSR to activate more upscale features</b>, instructions are available at the <a href='https://github.com/hlky/stable-diffusion-webui'>Github</a></p>
|
||||
</div>
|
||||
""")
|
||||
upscaleModes = ['RealESRGAN','GoBig']
|
||||
upscaleModes = ['RealESRGAN', 'GoBig']
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>Upscaler Selection</b>")
|
||||
imgproc_upscale_toggles = gr.Radio(label='', choices=upscaleModes, type="index",
|
||||
visible=RealESRGAN is not None, value='RealESRGAN')
|
||||
with gr.Box(visible=False) as upscalerSettings_group:
|
||||
|
||||
with gr.Box(visible=True) as realesrgan_group:
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>Upscaler Selection</b>")
|
||||
imgproc_upscale_toggles = gr.Radio(label = '',choices=upscaleModes, type="index",visible=RealESRGAN is not None,value='RealESRGAN')
|
||||
with gr.Box(visible=False) as upscalerSettings_group:
|
||||
|
||||
with gr.Box(visible=True) as realesrgan_group:
|
||||
gr.Markdown("<b>RealESRGAN Settings</b>")
|
||||
imgproc_realesrgan_model_name = gr.Dropdown(label='RealESRGAN model',
|
||||
interactive=RealESRGAN is not None,
|
||||
choices=['RealESRGAN_x4plus',
|
||||
'RealESRGAN_x4plus_anime_6B',
|
||||
'RealESRGAN_x2plus',
|
||||
'RealESRGAN_x2plus_anime_6B'],
|
||||
value='RealESRGAN_x4plus',
|
||||
visible=RealESRGAN is not None) # TODO: Feels like I shouldnt slot it in here.
|
||||
with gr.Box(visible=False) as ldsr_group:
|
||||
with gr.Row(elem_id="ldsr_settings_row"):
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>RealESRGAN Settings</b>")
|
||||
imgproc_realesrgan_model_name = gr.Dropdown(label='RealESRGAN model', interactive=RealESRGAN is not None,
|
||||
choices= ['RealESRGAN_x4plus',
|
||||
'RealESRGAN_x4plus_anime_6B','RealESRGAN_x2plus',
|
||||
'RealESRGAN_x2plus_anime_6B'],
|
||||
value='RealESRGAN_x4plus',
|
||||
visible=RealESRGAN is not None) # TODO: Feels like I shouldnt slot it in here.
|
||||
with gr.Box(visible=False) as ldsr_group:
|
||||
with gr.Row(elem_id="ldsr_settings_row"):
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>Latent Diffusion Super Sampling Settings</b>")
|
||||
imgproc_ldsr_steps = gr.Slider(minimum=0, maximum=500, step=10, label="LDSR Sampling Steps",
|
||||
value=100,visible=LDSR is not None)
|
||||
imgproc_ldsr_pre_downSample = gr.Dropdown(label='LDSR Pre Downsample mode (Lower resolution before processing for speed)',
|
||||
choices=["None", '1/2', '1/4'],value="None",visible=LDSR is not None)
|
||||
imgproc_ldsr_post_downSample = gr.Dropdown(label='LDSR Post Downsample mode (aka SuperSampling)',
|
||||
choices=["None", "Original Size", '1/2', '1/4'],value="None",visible=LDSR is not None)
|
||||
with gr.Box(visible=False) as gobig_group:
|
||||
with gr.Row(elem_id="proc_prompt_row"):
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>GoBig Settings</b>")
|
||||
imgproc_prompt = gr.Textbox(label="",
|
||||
elem_id='prompt_input',
|
||||
placeholder="A corgi wearing a top hat as an oil painting.",
|
||||
lines=1,
|
||||
max_lines=1,
|
||||
value=imgproc_defaults['prompt'],
|
||||
show_label=True,
|
||||
visible=RealESRGAN is not None)
|
||||
imgproc_sampling = gr.Dropdown(label='Sampling method (k_lms is default k-diffusion sampler)',
|
||||
choices=["DDIM", 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler',
|
||||
'k_heun', 'k_lms'],
|
||||
value=imgproc_defaults['sampler_name'],visible=RealESRGAN is not None)
|
||||
imgproc_steps = gr.Slider(minimum=1, maximum=250, step=1, label="Sampling Steps",
|
||||
value=imgproc_defaults['ddim_steps'],visible=RealESRGAN is not None)
|
||||
imgproc_cfg = gr.Slider(minimum=1.0, maximum=30.0, step=0.5,
|
||||
label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)',
|
||||
value=imgproc_defaults['cfg_scale'],visible=RealESRGAN is not None)
|
||||
imgproc_denoising = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising Strength',
|
||||
value=imgproc_defaults['denoising_strength'],visible=RealESRGAN is not None)
|
||||
imgproc_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height",
|
||||
value=imgproc_defaults["height"],visible=False) # not currently implemented
|
||||
imgproc_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width",
|
||||
value=imgproc_defaults["width"],visible=False) # not currently implemented
|
||||
imgproc_seed = gr.Textbox(label="Seed (blank to randomize)", lines=1, max_lines=1,
|
||||
value=imgproc_defaults["seed"],visible=RealESRGAN is not None)
|
||||
imgproc_btn.click(
|
||||
imgproc,
|
||||
[imgproc_source, imgproc_folder,imgproc_prompt,imgproc_toggles,
|
||||
imgproc_upscale_toggles,imgproc_realesrgan_model_name,imgproc_sampling, imgproc_steps, imgproc_height,
|
||||
imgproc_width, imgproc_cfg, imgproc_denoising, imgproc_seed,imgproc_gfpgan_strength,imgproc_ldsr_steps,imgproc_ldsr_pre_downSample,imgproc_ldsr_post_downSample],
|
||||
[imgproc_output])
|
||||
gr.Markdown("<b>Latent Diffusion Super Sampling Settings</b>")
|
||||
imgproc_ldsr_steps = gr.Slider(minimum=0, maximum=500, step=10,
|
||||
label="LDSR Sampling Steps",
|
||||
value=100, visible=LDSR is not None)
|
||||
imgproc_ldsr_pre_downSample = gr.Dropdown(
|
||||
label='LDSR Pre Downsample mode (Lower resolution before processing for speed)',
|
||||
choices=["None", '1/2', '1/4'], value="None", visible=LDSR is not None)
|
||||
imgproc_ldsr_post_downSample = gr.Dropdown(
|
||||
label='LDSR Post Downsample mode (aka SuperSampling)',
|
||||
choices=["None", "Original Size", '1/2', '1/4'], value="None",
|
||||
visible=LDSR is not None)
|
||||
with gr.Box(visible=False) as gobig_group:
|
||||
with gr.Row(elem_id="proc_prompt_row"):
|
||||
with gr.Column():
|
||||
gr.Markdown("<b>GoBig Settings</b>")
|
||||
imgproc_prompt = gr.Textbox(label="",
|
||||
elem_id='prompt_input',
|
||||
placeholder="A corgi wearing a top hat as an oil painting.",
|
||||
lines=1,
|
||||
max_lines=1,
|
||||
value=imgproc_defaults['prompt'],
|
||||
show_label=True,
|
||||
visible=RealESRGAN is not None)
|
||||
imgproc_sampling = gr.Dropdown(
|
||||
label='Sampling method (k_lms is default k-diffusion sampler)',
|
||||
choices=["DDIM", 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler',
|
||||
'k_heun', 'k_lms'],
|
||||
value=imgproc_defaults['sampler_name'], visible=RealESRGAN is not None)
|
||||
imgproc_steps = gr.Slider(minimum=1, maximum=250, step=1,
|
||||
label="Sampling Steps",
|
||||
value=imgproc_defaults['ddim_steps'],
|
||||
visible=RealESRGAN is not None)
|
||||
imgproc_cfg = gr.Slider(minimum=1.0, maximum=30.0, step=0.5,
|
||||
label='Classifier Free Guidance Scale (how strongly the image should follow the prompt)',
|
||||
value=imgproc_defaults['cfg_scale'],
|
||||
visible=RealESRGAN is not None)
|
||||
imgproc_denoising = gr.Slider(minimum=0.0, maximum=1.0, step=0.01,
|
||||
label='Denoising Strength',
|
||||
value=imgproc_defaults['denoising_strength'],
|
||||
visible=RealESRGAN is not None)
|
||||
imgproc_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height",
|
||||
value=imgproc_defaults["height"],
|
||||
visible=False) # not currently implemented
|
||||
imgproc_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width",
|
||||
value=imgproc_defaults["width"],
|
||||
visible=False) # not currently implemented
|
||||
imgproc_seed = gr.Textbox(label="Seed (blank to randomize)", lines=1,
|
||||
max_lines=1,
|
||||
value=imgproc_defaults["seed"],
|
||||
visible=RealESRGAN is not None)
|
||||
imgproc_btn.click(
|
||||
imgproc,
|
||||
[imgproc_source, imgproc_folder, imgproc_prompt, imgproc_toggles,
|
||||
imgproc_upscale_toggles, imgproc_realesrgan_model_name, imgproc_sampling,
|
||||
imgproc_steps, imgproc_height,
|
||||
imgproc_width, imgproc_cfg, imgproc_denoising, imgproc_seed,
|
||||
imgproc_gfpgan_strength, imgproc_ldsr_steps, imgproc_ldsr_pre_downSample,
|
||||
imgproc_ldsr_post_downSample],
|
||||
[imgproc_output])
|
||||
|
||||
imgproc_source.change(
|
||||
uifn.get_png_nfo,
|
||||
[imgproc_source],
|
||||
[imgproc_pngnfo] )
|
||||
imgproc_source.change(
|
||||
uifn.get_png_nfo,
|
||||
[imgproc_source],
|
||||
[imgproc_pngnfo])
|
||||
|
||||
output_txt2img_to_imglab.click(
|
||||
fn=uifn.copy_img_params_to_lab,
|
||||
inputs = [output_txt2img_params],
|
||||
outputs = [imgproc_prompt,imgproc_seed,imgproc_steps,imgproc_cfg,imgproc_sampling],
|
||||
)
|
||||
|
||||
output_txt2img_to_imglab.click(
|
||||
fn=uifn.copy_img_to_lab,
|
||||
inputs = [output_txt2img_gallery],
|
||||
outputs = [imgproc_source, tabs],
|
||||
_js=call_JS("moveImageFromGallery",
|
||||
fromId="txt2img_gallery_output",
|
||||
toId="imglab_input")
|
||||
)
|
||||
if RealESRGAN is None:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
#seperator
|
||||
gr.HTML("""
|
||||
output_txt2img_to_imglab.click(
|
||||
fn=uifn.copy_img_params_to_lab,
|
||||
inputs=[output_txt2img_params],
|
||||
outputs=[imgproc_prompt, imgproc_seed, imgproc_steps, imgproc_cfg,
|
||||
imgproc_sampling],
|
||||
)
|
||||
|
||||
output_txt2img_to_imglab.click(
|
||||
fn=uifn.copy_img_to_lab,
|
||||
inputs=[output_txt2img_gallery],
|
||||
outputs=[imgproc_source, tabs],
|
||||
_js=call_JS("moveImageFromGallery",
|
||||
fromId="txt2img_gallery_output",
|
||||
toId="imglab_input")
|
||||
)
|
||||
if RealESRGAN is None:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
# seperator
|
||||
gr.HTML("""
|
||||
<div id="90" style="max-width: 100%; font-size: 14px; text-align: center;" class="output-markdown gr-prose border-solid border border-gray-200 rounded gr-panel">
|
||||
<p><b> Please download RealESRGAN to activate upscale features</b>, instructions are available at the <a href='https://github.com/hlky/stable-diffusion-webui'>Github</a></p>
|
||||
</div>
|
||||
""")
|
||||
imgproc_toggles.change(fn=uifn.toggle_options_gfpgan, inputs=[imgproc_toggles], outputs=[gfpgan_group])
|
||||
imgproc_toggles.change(fn=uifn.toggle_options_upscalers, inputs=[imgproc_toggles], outputs=[upscale_group])
|
||||
imgproc_toggles.change(fn=uifn.toggle_options_upscalers, inputs=[imgproc_toggles], outputs=[upscalerSettings_group])
|
||||
imgproc_upscale_toggles.change(fn=uifn.toggle_options_realesrgan, inputs=[imgproc_upscale_toggles], outputs=[realesrgan_group])
|
||||
imgproc_upscale_toggles.change(fn=uifn.toggle_options_ldsr, inputs=[imgproc_upscale_toggles], outputs=[ldsr_group])
|
||||
imgproc_upscale_toggles.change(fn=uifn.toggle_options_gobig, inputs=[imgproc_upscale_toggles], outputs=[gobig_group])
|
||||
imgproc_toggles.change(fn=uifn.toggle_options_upscalers, inputs=[imgproc_toggles],
|
||||
outputs=[upscalerSettings_group])
|
||||
imgproc_upscale_toggles.change(fn=uifn.toggle_options_realesrgan, inputs=[imgproc_upscale_toggles],
|
||||
outputs=[realesrgan_group])
|
||||
imgproc_upscale_toggles.change(fn=uifn.toggle_options_ldsr, inputs=[imgproc_upscale_toggles],
|
||||
outputs=[ldsr_group])
|
||||
imgproc_upscale_toggles.change(fn=uifn.toggle_options_gobig, inputs=[imgproc_upscale_toggles],
|
||||
outputs=[gobig_group])
|
||||
|
||||
"""
|
||||
if GFPGAN is not None:
|
||||
|
@ -6,14 +6,17 @@ import base64
|
||||
import re
|
||||
|
||||
|
||||
def change_image_editor_mode(choice, cropped_image, resize_mode, width, height):
|
||||
def change_image_editor_mode(choice, cropped_image, masked_image, resize_mode, width, height):
|
||||
if choice == "Mask":
|
||||
return [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)]
|
||||
return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)]
|
||||
update_image_result = update_image_mask(cropped_image, resize_mode, width, height)
|
||||
return [gr.update(visible=False), update_image_result, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)]
|
||||
|
||||
update_image_result = update_image_mask(masked_image["image"] if masked_image is not None else None, resize_mode, width, height)
|
||||
return [update_image_result, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)]
|
||||
|
||||
def update_image_mask(cropped_image, resize_mode, width, height):
|
||||
resized_cropped_image = resize_image(resize_mode, cropped_image, width, height) if cropped_image else None
|
||||
return gr.update(value=resized_cropped_image)
|
||||
return gr.update(value=resized_cropped_image, visible=True)
|
||||
|
||||
def toggle_options_gfpgan(selection):
|
||||
if 0 in selection:
|
||||
|
@ -1,5 +1,7 @@
|
||||
import argparse, os, sys, glob, re
|
||||
|
||||
import cv2
|
||||
|
||||
from frontend.frontend import draw_gradio_ui
|
||||
from frontend.job_manager import JobManager, JobInfo
|
||||
from frontend.ui_functions import resize_image
|
||||
@ -1223,9 +1225,14 @@ class Flagging(gr.FlaggingCallback):
|
||||
print("Logged:", filenames[0])
|
||||
|
||||
|
||||
def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask: any, mask_mode: str, mask_blur_strength: int, ddim_steps: int, sampler_name: str,
|
||||
def img2img(prompt: str, image_editor_mode: str, mask_mode: str, mask_blur_strength: int, ddim_steps: int, sampler_name: str,
|
||||
toggles: List[int], realesrgan_model_name: str, n_iter: int, cfg_scale: float, denoising_strength: float,
|
||||
seed: int, height: int, width: int, resize_mode: int, fp = None, job_info: JobInfo = None):
|
||||
seed: int, height: int, width: int, resize_mode: int, init_info: any = None, init_info_mask: any = None, fp = None, job_info: JobInfo = None):
|
||||
# print([prompt, image_editor_mode, init_info, init_info_mask, mask_mode,
|
||||
# mask_blur_strength, ddim_steps, sampler_name, toggles,
|
||||
# realesrgan_model_name, n_iter, cfg_scale,
|
||||
# denoising_strength, seed, height, width, resize_mode,
|
||||
# fp])
|
||||
outpath = opt.outdir_img2img or opt.outdir or "outputs/img2img-samples"
|
||||
err = False
|
||||
seed = seed_to_int(seed)
|
||||
@ -1274,13 +1281,15 @@ def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask:
|
||||
init_img = init_info_mask["image"]
|
||||
init_img = init_img.convert("RGB")
|
||||
init_img = resize_image(resize_mode, init_img, width, height)
|
||||
init_img = init_img.convert("RGB")
|
||||
init_mask = init_info_mask["mask"]
|
||||
init_mask = resize_image(resize_mode, init_mask, width, height)
|
||||
keep_mask = mask_mode == 0
|
||||
init_mask = init_mask.convert("RGB")
|
||||
init_mask = resize_image(resize_mode, init_mask, width, height)
|
||||
init_mask = init_mask.convert("RGB")
|
||||
keep_mask = mask_mode == 0
|
||||
init_mask = init_mask if keep_mask else ImageOps.invert(init_mask)
|
||||
else:
|
||||
init_img = init_info.convert("RGB")
|
||||
init_img = init_info
|
||||
init_mask = None
|
||||
keep_mask = False
|
||||
|
||||
@ -1290,14 +1299,14 @@ def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask:
|
||||
def init():
|
||||
image = init_img.convert("RGB")
|
||||
image = resize_image(resize_mode, image, width, height)
|
||||
#image = image.convert("RGB") #todo: mask mode -> ValueError: could not convert string to float:
|
||||
#image = image.convert("RGB")
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
|
||||
mask_channel = None
|
||||
if image_editor_mode == "Uncrop":
|
||||
alpha = init_img.convert("RGB")
|
||||
alpha = init_img.convert("RGBA")
|
||||
alpha = resize_image(resize_mode, alpha, width // 8, height // 8)
|
||||
mask_channel = alpha.split()[-1]
|
||||
mask_channel = mask_channel.filter(ImageFilter.GaussianBlur(4))
|
||||
@ -1305,7 +1314,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask:
|
||||
mask_channel[mask_channel >= 255] = 255
|
||||
mask_channel[mask_channel < 255] = 0
|
||||
mask_channel = Image.fromarray(mask_channel).filter(ImageFilter.GaussianBlur(2))
|
||||
elif init_mask is not None:
|
||||
elif image_editor_mode == "Mask":
|
||||
alpha = init_mask.convert("RGBA")
|
||||
alpha = resize_image(resize_mode, alpha, width // 8, height // 8)
|
||||
mask_channel = alpha.split()[1]
|
||||
@ -1324,7 +1333,7 @@ def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask:
|
||||
init_image = init_image.to(device)
|
||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||
init_latent = (model if not opt.optimized else modelFS).get_first_stage_encoding((model if not opt.optimized else modelFS).encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
|
||||
if opt.optimized:
|
||||
mem = torch.cuda.memory_allocated()/1e6
|
||||
modelFS.to("cpu")
|
||||
@ -1382,7 +1391,17 @@ def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask:
|
||||
history = []
|
||||
initial_seed = None
|
||||
|
||||
do_color_correction = False
|
||||
try:
|
||||
from skimage import exposure
|
||||
do_color_correction = True
|
||||
except:
|
||||
print("Install scikit-image to perform color correction on loopback")
|
||||
|
||||
for i in range(n_iter):
|
||||
if do_color_correction and i == 0:
|
||||
correction_target = cv2.cvtColor(np.asarray(init_img.copy()), cv2.COLOR_RGB2LAB)
|
||||
|
||||
output_images, seed, info, stats = process_images(
|
||||
outpath=outpath,
|
||||
func_init=init,
|
||||
@ -1424,6 +1443,17 @@ def img2img(prompt: str, image_editor_mode: str, init_info: any, init_info_mask:
|
||||
initial_seed = seed
|
||||
|
||||
init_img = output_images[0]
|
||||
|
||||
if do_color_correction and correction_target is not None:
|
||||
init_img = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
|
||||
cv2.cvtColor(
|
||||
np.asarray(init_img),
|
||||
cv2.COLOR_RGB2LAB
|
||||
),
|
||||
correction_target,
|
||||
channel_axis=2
|
||||
), cv2.COLOR_LAB2RGB).astype("uint8"))
|
||||
|
||||
if not random_seed_loopback:
|
||||
seed = seed + 1
|
||||
else:
|
||||
@ -2012,9 +2042,9 @@ imgproc_mode_toggles = [
|
||||
'Upscale'
|
||||
]
|
||||
|
||||
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
|
||||
|
||||
#sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
||||
#sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
|
||||
sample_img2img = None
|
||||
# make sure these indicies line up at the top of img2img()
|
||||
img2img_toggles = [
|
||||
'Create prompt matrix (separate multiple prompts using |, and get all combinations of them)',
|
||||
@ -2078,22 +2108,13 @@ def update_image_mask(cropped_image, resize_mode, width, height):
|
||||
resized_cropped_image = resize_image(resize_mode, cropped_image, width, height) if cropped_image else None
|
||||
return gr.update(value=resized_cropped_image)
|
||||
|
||||
def copy_img_to_input(img):
|
||||
try:
|
||||
image_data = re.sub('^data:image/.+;base64,', '', img)
|
||||
processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
|
||||
tab_update = gr.update(selected='img2img_tab')
|
||||
img_update = gr.update(value=processed_image)
|
||||
return {img2img_image_mask: processed_image, img2img_image_editor: img_update, tabs: tab_update}
|
||||
except IndexError:
|
||||
return [None, None]
|
||||
|
||||
|
||||
def copy_img_to_upscale_esrgan(img):
|
||||
update = gr.update(selected='realesrgan_tab')
|
||||
image_data = re.sub('^data:image/.+;base64,', '', img)
|
||||
processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
|
||||
return {realesrgan_source: processed_image, tabs: update}
|
||||
return {'realesrgan_source': processed_image, 'tabs': update}
|
||||
|
||||
|
||||
help_text = """
|
||||
|
Loading…
Reference in New Issue
Block a user