mirror of
https://github.com/openvinotoolkit/stable-diffusion-webui.git
synced 2024-12-15 07:03:06 +03:00
Add batch processing to Extras tab
This commit is contained in:
parent
deea9f4d70
commit
3763837003
@ -13,66 +13,85 @@ import piexif.helper
|
|||||||
cached_images = {}
|
cached_images = {}
|
||||||
|
|
||||||
|
|
||||||
def run_extras(image, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
|
def run_extras(image, image_folder, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
|
||||||
devices.torch_gc()
|
devices.torch_gc()
|
||||||
|
|
||||||
existing_pnginfo = image.info or {}
|
imageArr = []
|
||||||
|
|
||||||
image = image.convert("RGB")
|
if image_folder != None:
|
||||||
info = ""
|
if image != None:
|
||||||
|
print("Batch detected and single image detected, please only use one of the two. Aborting.")
|
||||||
|
return None
|
||||||
|
#convert file to pillow image
|
||||||
|
for img in image_folder:
|
||||||
|
image = Image.fromarray(np.array(Image.open(img)))
|
||||||
|
imageArr.append(image)
|
||||||
|
|
||||||
|
elif image != None:
|
||||||
|
if image_folder != None:
|
||||||
|
print("Batch detected and single image detected, please only use one of the two. Aborting.")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
imageArr.append(image)
|
||||||
|
|
||||||
outpath = opts.outdir_samples or opts.outdir_extras_samples
|
outpath = opts.outdir_samples or opts.outdir_extras_samples
|
||||||
|
|
||||||
if gfpgan_visibility > 0:
|
for image in imageArr:
|
||||||
restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8))
|
existing_pnginfo = image.info or {}
|
||||||
res = Image.fromarray(restored_img)
|
|
||||||
|
|
||||||
if gfpgan_visibility < 1.0:
|
image = image.convert("RGB")
|
||||||
res = Image.blend(image, res, gfpgan_visibility)
|
info = ""
|
||||||
|
|
||||||
info += f"GFPGAN visibility:{round(gfpgan_visibility, 2)}\n"
|
if gfpgan_visibility > 0:
|
||||||
image = res
|
restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8))
|
||||||
|
res = Image.fromarray(restored_img)
|
||||||
|
|
||||||
if codeformer_visibility > 0:
|
if gfpgan_visibility < 1.0:
|
||||||
restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight)
|
res = Image.blend(image, res, gfpgan_visibility)
|
||||||
res = Image.fromarray(restored_img)
|
|
||||||
|
|
||||||
if codeformer_visibility < 1.0:
|
info += f"GFPGAN visibility:{round(gfpgan_visibility, 2)}\n"
|
||||||
res = Image.blend(image, res, codeformer_visibility)
|
image = res
|
||||||
|
|
||||||
info += f"CodeFormer w: {round(codeformer_weight, 2)}, CodeFormer visibility:{round(codeformer_visibility)}\n"
|
if codeformer_visibility > 0:
|
||||||
image = res
|
restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight)
|
||||||
|
res = Image.fromarray(restored_img)
|
||||||
|
|
||||||
if upscaling_resize != 1.0:
|
if codeformer_visibility < 1.0:
|
||||||
def upscale(image, scaler_index, resize):
|
res = Image.blend(image, res, codeformer_visibility)
|
||||||
small = image.crop((image.width // 2, image.height // 2, image.width // 2 + 10, image.height // 2 + 10))
|
|
||||||
pixels = tuple(np.array(small).flatten().tolist())
|
|
||||||
key = (resize, scaler_index, image.width, image.height, gfpgan_visibility, codeformer_visibility, codeformer_weight) + pixels
|
|
||||||
|
|
||||||
c = cached_images.get(key)
|
info += f"CodeFormer w: {round(codeformer_weight, 2)}, CodeFormer visibility:{round(codeformer_visibility)}\n"
|
||||||
if c is None:
|
image = res
|
||||||
upscaler = shared.sd_upscalers[scaler_index]
|
|
||||||
c = upscaler.upscale(image, image.width * resize, image.height * resize)
|
|
||||||
cached_images[key] = c
|
|
||||||
|
|
||||||
return c
|
if upscaling_resize != 1.0:
|
||||||
|
def upscale(image, scaler_index, resize):
|
||||||
|
small = image.crop((image.width // 2, image.height // 2, image.width // 2 + 10, image.height // 2 + 10))
|
||||||
|
pixels = tuple(np.array(small).flatten().tolist())
|
||||||
|
key = (resize, scaler_index, image.width, image.height, gfpgan_visibility, codeformer_visibility, codeformer_weight) + pixels
|
||||||
|
|
||||||
info += f"Upscale: {round(upscaling_resize, 3)}, model:{shared.sd_upscalers[extras_upscaler_1].name}\n"
|
c = cached_images.get(key)
|
||||||
res = upscale(image, extras_upscaler_1, upscaling_resize)
|
if c is None:
|
||||||
|
upscaler = shared.sd_upscalers[scaler_index]
|
||||||
|
c = upscaler.upscale(image, image.width * resize, image.height * resize)
|
||||||
|
cached_images[key] = c
|
||||||
|
|
||||||
if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
|
return c
|
||||||
res2 = upscale(image, extras_upscaler_2, upscaling_resize)
|
|
||||||
info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {round(extras_upscaler_2_visibility, 3)}, model:{shared.sd_upscalers[extras_upscaler_2].name}\n"
|
|
||||||
res = Image.blend(res, res2, extras_upscaler_2_visibility)
|
|
||||||
|
|
||||||
image = res
|
info += f"Upscale: {round(upscaling_resize, 3)}, model:{shared.sd_upscalers[extras_upscaler_1].name}\n"
|
||||||
|
res = upscale(image, extras_upscaler_1, upscaling_resize)
|
||||||
|
|
||||||
while len(cached_images) > 2:
|
if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
|
||||||
del cached_images[next(iter(cached_images.keys()))]
|
res2 = upscale(image, extras_upscaler_2, upscaling_resize)
|
||||||
|
info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {round(extras_upscaler_2_visibility, 3)}, model:{shared.sd_upscalers[extras_upscaler_2].name}\n"
|
||||||
|
res = Image.blend(res, res2, extras_upscaler_2_visibility)
|
||||||
|
|
||||||
images.save_image(image, path=outpath, basename="", seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo)
|
image = res
|
||||||
|
|
||||||
return image, plaintext_to_html(info), ''
|
while len(cached_images) > 2:
|
||||||
|
del cached_images[next(iter(cached_images.keys()))]
|
||||||
|
|
||||||
|
images.save_image(image, path=outpath, basename="", seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo)
|
||||||
|
|
||||||
|
return imageArr, plaintext_to_html(info), ''
|
||||||
|
|
||||||
|
|
||||||
def run_pnginfo(image):
|
def run_pnginfo(image):
|
||||||
|
@ -644,8 +644,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||||||
with gr.Blocks(analytics_enabled=False) as extras_interface:
|
with gr.Blocks(analytics_enabled=False) as extras_interface:
|
||||||
with gr.Row().style(equal_height=False):
|
with gr.Row().style(equal_height=False):
|
||||||
with gr.Column(variant='panel'):
|
with gr.Column(variant='panel'):
|
||||||
with gr.Group():
|
with gr.Tabs():
|
||||||
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
|
with gr.TabItem('Single Image'):
|
||||||
|
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
|
||||||
|
|
||||||
|
with gr.TabItem('Batch Process'):
|
||||||
|
image_batch = gr.File(label="Batch Process", file_count="multiple", source="upload", interactive=True, type="file")
|
||||||
|
|
||||||
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
|
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2)
|
||||||
|
|
||||||
@ -666,7 +670,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||||||
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
|
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
|
||||||
|
|
||||||
with gr.Column(variant='panel'):
|
with gr.Column(variant='panel'):
|
||||||
result_image = gr.Image(label="Result")
|
result_images = gr.Gallery(label="Result")
|
||||||
html_info_x = gr.HTML()
|
html_info_x = gr.HTML()
|
||||||
html_info = gr.HTML()
|
html_info = gr.HTML()
|
||||||
|
|
||||||
@ -674,6 +678,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||||||
fn=run_extras,
|
fn=run_extras,
|
||||||
inputs=[
|
inputs=[
|
||||||
image,
|
image,
|
||||||
|
image_batch,
|
||||||
gfpgan_visibility,
|
gfpgan_visibility,
|
||||||
codeformer_visibility,
|
codeformer_visibility,
|
||||||
codeformer_weight,
|
codeformer_weight,
|
||||||
@ -683,7 +688,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||||||
extras_upscaler_2_visibility,
|
extras_upscaler_2_visibility,
|
||||||
],
|
],
|
||||||
outputs=[
|
outputs=[
|
||||||
result_image,
|
result_images,
|
||||||
html_info_x,
|
html_info_x,
|
||||||
html_info,
|
html_info,
|
||||||
]
|
]
|
||||||
|
Loading…
Reference in New Issue
Block a user