diff --git a/webui.py b/webui.py index 37caa4a..675fec1 100644 --- a/webui.py +++ b/webui.py @@ -390,7 +390,8 @@ def check_prompt_length(prompt, comments): comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") -def process_images(outpath, func_init, func_sample, prompt, seed, sampler_name, skip_grid, skip_save, batch_size, n_iter, steps, cfg_scale, width, height, prompt_matrix, use_GFPGAN, fp, do_not_save_grid=False, normalize_prompt_weights=True, init_img=None, init_mask=None, keep_mask=True): +def process_images(outpath, func_init, func_sample, prompt, seed, sampler_name, skip_grid, skip_save, batch_size, n_iter, steps, cfg_scale, width, height, prompt_matrix, + , fp, do_not_save_grid=False, normalize_prompt_weights=True, init_img=None, init_mask=None, keep_mask=True): """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch""" assert prompt is not None torch_gc() @@ -493,9 +494,8 @@ def process_images(outpath, func_init, func_sample, prompt, seed, sampler_name, x_sample = x_sample.astype(np.uint8) if use_GFPGAN and GFPGAN is not None: - cropped_faces, restored_faces, restored_img = GFPGAN.enhance(x_sample, has_aligned=False, only_center_face=False, paste_back=True) - x_sample = restored_img - + cropped_faces, restored_faces, restored_img = GFPGAN.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True) + x_sample = restored_img[:,:,::-1] image = Image.fromarray(x_sample) if init_mask: @@ -971,4 +971,4 @@ demo = gr.TabbedInterface( theme="default", ) demo.queue(concurrency_count=1) -demo.launch(show_error=True, server_name='0.0.0.0') \ No newline at end of file +demo.launch(show_error=True, server_name='0.0.0.0')