mirror of
https://github.com/Sygil-Dev/sygil-webui.git
synced 2024-12-14 22:13:41 +03:00
Fixed LDSR not working on txt2img and img2img.
- Removed the checkbox to disable the preview image, instead users should increase the frequency at which it is displayed if they have performance issues, after a certain point it no longer affects performance.
This commit is contained in:
parent
38ff4a138c
commit
7354c901d2
@ -150,15 +150,15 @@ def layout():
|
||||
(Not properly implemented and currently not working, check this \
|
||||
link 'https://github.com/huggingface/diffusers/pull/537' for more information on it ). Default: False")
|
||||
|
||||
st.session_state["defaults"].general.update_preview = st.checkbox("Update Preview Image", value=st.session_state['defaults'].general.update_preview,
|
||||
help="Enables the preview image to be updated and shown to the user on the UI during the generation.\
|
||||
If checked, once you save the settings an option to specify the frequency at which the image is updated\
|
||||
in steps will be shown, this is helpful to reduce the negative effect this option has on performance. \
|
||||
Default: True")
|
||||
if st.session_state["defaults"].general.update_preview:
|
||||
st.session_state["defaults"].general.update_preview_frequency = int(st.text_input("Update Preview Frequency", value=st.session_state['defaults'].general.update_preview_frequency,
|
||||
help="Specify the frequency at which the image is updated in steps, this is helpful to reduce the \
|
||||
negative effect updating the preview image has on performance. Default: 10"))
|
||||
#st.session_state["defaults"].general.update_preview = st.checkbox("Update Preview Image", value=st.session_state['defaults'].general.update_preview,
|
||||
#help="Enables the preview image to be updated and shown to the user on the UI during the generation.\
|
||||
#If checked, once you save the settings an option to specify the frequency at which the image is updated\
|
||||
#in steps will be shown, this is helpful to reduce the negative effect this option has on performance. \
|
||||
#Default: True")
|
||||
st.session_state["defaults"].general.update_preview = True
|
||||
st.session_state["defaults"].general.update_preview_frequency = int(st.text_input("Update Preview Frequency", value=st.session_state['defaults'].general.update_preview_frequency,
|
||||
help="Specify the frequency at which the image is updated in steps, this is helpful to reduce the \
|
||||
negative effect updating the preview image has on performance. Default: 10"))
|
||||
|
||||
with col3:
|
||||
st.title("Others")
|
||||
@ -222,7 +222,7 @@ def layout():
|
||||
at https://huggingface.co/settings/tokens. Default: None")
|
||||
|
||||
with txt2img_tab:
|
||||
col1, col2, col3, col4, col5 = st.columns(5, gap='large')
|
||||
col1, col2, col3, col4, col5 = st.columns(5, gap='medium')
|
||||
|
||||
with col1:
|
||||
st.title("Slider Parameters")
|
||||
@ -277,44 +277,19 @@ def layout():
|
||||
st.session_state["defaults"].txt2img.sampling_steps.step = int(st.text_input("Sampling Slider Steps",
|
||||
value=st.session_state['defaults'].txt2img.sampling_steps.step,
|
||||
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
||||
|
||||
with col3:
|
||||
# Batch Count
|
||||
st.session_state["defaults"].txt2img.batch_count.value = int(st.text_input("Default Batch Count",
|
||||
value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||
help="Set the default batch count to use. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2img.batch_count.min_value = int(st.text_input("Minimum Batch Count",
|
||||
value=st.session_state['defaults'].txt2img.batch_count.min_value,
|
||||
help="Set the default minimum value for the batch count slider. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2img.batch_count.max_value = int(st.text_input("Maximum Batch Count",
|
||||
value=st.session_state['defaults'].txt2img.batch_count.max_value,
|
||||
help="Set the default maximum value for the batch count slider. Default is: 100"))
|
||||
|
||||
st.session_state["defaults"].txt2img.batch_count.step = int(st.text_input("Batch Count Slider Steps",
|
||||
value=st.session_state['defaults'].txt2img.batch_count.step,
|
||||
help="Set the default value for the number of steps on the batch count slider. Default is: 10"))
|
||||
|
||||
# Batch Size
|
||||
st.session_state["defaults"].txt2img.batch_size.value = int(st.text_input("Default Batch Size",
|
||||
value=st.session_state['defaults'].txt2img.batch_size.value,
|
||||
help="Set the default batch size to use. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2img.batch_size.min_value = int(st.text_input("Minimum Batch Size",
|
||||
value=st.session_state['defaults'].txt2img.batch_size.min_value,
|
||||
help="Set the default minimum value for the batch size slider. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2img.batch_size.max_value = int(st.text_input("Maximum Batch Size",
|
||||
value=st.session_state['defaults'].txt2img.batch_size.max_value,
|
||||
help="Set the default maximum value for the batch size slider. Default is: 5"))
|
||||
|
||||
st.session_state["defaults"].txt2img.batch_size.step = int(st.text_input("Batch Size Slider Steps",
|
||||
value=st.session_state['defaults'].txt2img.batch_size.step,
|
||||
help="Set the default value for the number of steps on the batch size slider. Default is: 1"))
|
||||
|
||||
with col4:
|
||||
with col3:
|
||||
st.title("General Parameters")
|
||||
|
||||
# Batch Count
|
||||
st.session_state["batch_count"] = st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||
help="How many iterations or batches of images to generate in total.")
|
||||
|
||||
st.session_state["batch_size"] = st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
||||
takes to finish generation as more images are generated at once.\
|
||||
Default: 1")
|
||||
|
||||
default_sampler_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
|
||||
st.session_state["defaults"].txt2img.default_sampler = st.selectbox("Default Sampler",
|
||||
@ -322,6 +297,8 @@ def layout():
|
||||
help="Defaut sampler to use for txt2img. Default: k_euler")
|
||||
|
||||
st.session_state['defaults'].txt2img.seed = st.text_input("Default Seed", value=st.session_state['defaults'].txt2img.seed, help="Default seed.")
|
||||
|
||||
with col4:
|
||||
|
||||
st.session_state["defaults"].txt2img.separate_prompts = st.checkbox("Separate Prompts",
|
||||
value=st.session_state['defaults'].txt2img.separate_prompts, help="Separate Prompts. Default: False")
|
||||
@ -351,9 +328,7 @@ def layout():
|
||||
st.session_state["defaults"].txt2img.use_RealESRGAN = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2img.use_RealESRGAN,
|
||||
help="Choose to use RealESRGAN. Default: False")
|
||||
|
||||
st.session_state["defaults"].txt2img.update_preview = st.checkbox("Update Preview Image", value=st.session_state['defaults'].txt2img.update_preview,
|
||||
help="Choose to update the preview image during generation. Default: True")
|
||||
|
||||
st.session_state["defaults"].txt2img.update_preview = True
|
||||
st.session_state["defaults"].txt2img.update_preview_frequency = int(st.text_input("Preview Image Update Frequency",
|
||||
value=st.session_state['defaults'].txt2img.update_preview_frequency,
|
||||
help="Set the default value for the frrquency of the preview image updates. Default is: 10"))
|
||||
@ -471,38 +446,14 @@ def layout():
|
||||
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
||||
|
||||
# Batch Count
|
||||
st.session_state["defaults"].img2img.batch_count.value = int(st.text_input("Default Img2Img Batch Count",
|
||||
value=st.session_state['defaults'].img2img.batch_count.value,
|
||||
help="Set the default batch count to use. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_count.min_value = int(st.text_input("Minimum Img2Img Batch Count",
|
||||
value=st.session_state['defaults'].img2img.batch_count.min_value,
|
||||
help="Set the default minimum value for the batch count slider. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_count.max_value = int(st.text_input("Maximum Img2Img Batch Count",
|
||||
value=st.session_state['defaults'].img2img.batch_count.max_value,
|
||||
help="Set the default maximum value for the batch count slider. Default is: 100"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_count.step = int(st.text_input("Img2Img Batch Count Slider Steps",
|
||||
value=st.session_state['defaults'].img2img.batch_count.step,
|
||||
help="Set the default value for the number of steps on the batch count slider. Default is: 10"))
|
||||
|
||||
# Batch Size
|
||||
st.session_state["defaults"].img2img.batch_size.value = int(st.text_input("Default Img2Img Batch Size",
|
||||
value=st.session_state['defaults'].img2img.batch_size.value,
|
||||
help="Set the default batch size to use. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_size.min_value = int(st.text_input("Minimum Img2Img Batch Size",
|
||||
value=st.session_state['defaults'].img2img.batch_size.min_value,
|
||||
help="Set the default minimum value for the batch size slider. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_size.max_value = int(st.text_input("Maximum Img2Img Batch Size",
|
||||
value=st.session_state['defaults'].img2img.batch_size.max_value,
|
||||
help="Set the default maximum value for the batch size slider. Default is: 5"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_size.step = int(st.text_input("Img2Img Batch Size Slider Steps",
|
||||
value=st.session_state['defaults'].img2img.batch_size.step,
|
||||
help="Set the default value for the number of steps on the batch size slider. Default is: 1"))
|
||||
st.session_state["batch_count"] = st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||
help="How many iterations or batches of images to generate in total.")
|
||||
|
||||
st.session_state["batch_size"] = st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
||||
takes to finish generation as more images are generated at once.\
|
||||
Default: 1")
|
||||
with col4:
|
||||
# Inference Steps
|
||||
st.session_state["defaults"].img2img.num_inference_steps.value = int(st.text_input("Default Inference Steps",
|
||||
@ -580,9 +531,7 @@ def layout():
|
||||
st.session_state["defaults"].img2img.use_RealESRGAN = st.checkbox("Img2Img Use RealESRGAN", value=st.session_state['defaults'].img2img.use_RealESRGAN,
|
||||
help="Choose to use RealESRGAN. Default: False")
|
||||
|
||||
st.session_state["defaults"].img2img.update_preview = st.checkbox("Update Img2Img Preview Image", value=st.session_state['defaults'].img2img.update_preview,
|
||||
help="Choose to update the preview image during generation. Default: True")
|
||||
|
||||
st.session_state["defaults"].img2img.update_preview = True
|
||||
st.session_state["defaults"].img2img.update_preview_frequency = int(st.text_input("Img2Img Preview Image Update Frequency",
|
||||
value=st.session_state['defaults'].img2img.update_preview_frequency,
|
||||
help="Set the default value for the frrquency of the preview image updates. Default is: 10"))
|
||||
@ -686,38 +635,14 @@ def layout():
|
||||
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
||||
|
||||
# Batch Count
|
||||
st.session_state["defaults"].txt2vid.batch_count.value = int(st.text_input("Default txt2vid Batch Count",
|
||||
value=st.session_state['defaults'].txt2vid.batch_count.value,
|
||||
help="Set the default batch count to use. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2vid.batch_count.min_value = int(st.text_input("Minimum txt2vid Batch Count",
|
||||
value=st.session_state['defaults'].img2img.batch_count.min_value,
|
||||
help="Set the default minimum value for the batch count slider. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].img2img.batch_count.max_value = int(st.text_input("Maximum txt2vid Batch Count",
|
||||
value=st.session_state['defaults'].txt2vid.batch_count.max_value,
|
||||
help="Set the default maximum value for the batch count slider. Default is: 100"))
|
||||
|
||||
st.session_state["defaults"].txt2vid.batch_count.step = int(st.text_input("txt2vid Batch Count Slider Steps",
|
||||
value=st.session_state['defaults'].txt2vid.batch_count.step,
|
||||
help="Set the default value for the number of steps on the batch count slider. Default is: 10"))
|
||||
|
||||
# Batch Size
|
||||
st.session_state["defaults"].txt2vid.batch_size.value = int(st.text_input("Default txt2vid Batch Size",
|
||||
value=st.session_state['defaults'].txt2vid.batch_size.value,
|
||||
help="Set the default batch size to use. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2vid.batch_size.min_value = int(st.text_input("Minimum txt2vid Batch Size",
|
||||
value=st.session_state['defaults'].txt2vid.batch_size.min_value,
|
||||
help="Set the default minimum value for the batch size slider. Default is: 1"))
|
||||
|
||||
st.session_state["defaults"].txt2vid.batch_size.max_value = int(st.text_input("Maximum txt2vid Batch Size",
|
||||
value=st.session_state['defaults'].txt2vid.batch_size.max_value,
|
||||
help="Set the default maximum value for the batch size slider. Default is: 5"))
|
||||
|
||||
st.session_state["defaults"].txt2vid.batch_size.step = int(st.text_input("txt2vid Batch Size Slider Steps",
|
||||
value=st.session_state['defaults'].txt2vid.batch_size.step,
|
||||
help="Set the default value for the number of steps on the batch size slider. Default is: 1"))
|
||||
st.session_state["batch_count"] = st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||
help="How many iterations or batches of images to generate in total.")
|
||||
|
||||
st.session_state["batch_size"] = st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
||||
takes to finish generation as more images are generated at once.\
|
||||
Default: 1")
|
||||
|
||||
# Inference Steps
|
||||
st.session_state["defaults"].txt2vid.num_inference_steps.value = int(st.text_input("Default Txt2Vid Inference Steps",
|
||||
@ -790,9 +715,7 @@ def layout():
|
||||
st.session_state["defaults"].txt2vid.use_RealESRGAN = st.checkbox("txt2vid Use RealESRGAN", value=st.session_state['defaults'].txt2vid.use_RealESRGAN,
|
||||
help="Choose to use RealESRGAN. Default: False")
|
||||
|
||||
st.session_state["defaults"].txt2vid.update_preview = st.checkbox("Update txt2vid Preview Image", value=st.session_state['defaults'].txt2vid.update_preview,
|
||||
help="Choose to update the preview image during generation. Default: True")
|
||||
|
||||
st.session_state["defaults"].txt2vid.update_preview = True
|
||||
st.session_state["defaults"].txt2vid.update_preview_frequency = int(st.text_input("txt2vid Preview Image Update Frequency",
|
||||
value=st.session_state['defaults'].txt2vid.update_preview_frequency,
|
||||
help="Set the default value for the frrquency of the preview image updates. Default is: 10"))
|
||||
|
@ -437,44 +437,40 @@ def layout():
|
||||
step=st.session_state['defaults'].img2img.find_noise_steps.step)
|
||||
|
||||
with st.expander("Batch Options"):
|
||||
batch_count = st.slider("Batch count.", min_value=st.session_state['defaults'].img2img.batch_count.min_value, max_value=st.session_state['defaults'].img2img.batch_count.max_value,
|
||||
value=st.session_state['defaults'].img2img.batch_count.value, step=st.session_state['defaults'].img2img.batch_count.step,
|
||||
help="How many iterations or batches of images to generate in total.")
|
||||
|
||||
batch_size = st.slider("Batch size", min_value=st.session_state['defaults'].img2img.batch_size.min_value, max_value=st.session_state['defaults'].img2img.batch_size.max_value,
|
||||
value=st.session_state['defaults'].img2img.batch_size.value, step=st.session_state['defaults'].img2img.batch_size.step,
|
||||
help="How many images are at once in a batch. It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish \
|
||||
generation as more images are generated at once.Default: 1")
|
||||
st.session_state["batch_count"] = int(st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||
help="How many iterations or batches of images to generate in total."))
|
||||
|
||||
st.session_state["batch_size"] = int(st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
Default: 1"))
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].img2img.update_preview,
|
||||
help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
st.session_state["update_preview"] = st.session_state["defaults"].general.update_preview
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=st.session_state['defaults'].img2img.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
#
|
||||
with st.expander("Advanced"):
|
||||
separate_prompts = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].img2img.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
normalize_prompt_weights = st.checkbox("Normalize Prompt Weights.", value=st.session_state['defaults'].img2img.normalize_prompt_weights,
|
||||
help="Ensure the sum of all weights add up to 1.0")
|
||||
loopback = st.checkbox("Loopback.", value=st.session_state['defaults'].img2img.loopback, help="Use images from previous batch when creating next batch.")
|
||||
random_seed_loopback = st.checkbox("Random loopback seed.", value=st.session_state['defaults'].img2img.random_seed_loopback, help="Random loopback seed")
|
||||
img2img_mask_restore = st.checkbox("Only modify regenerated parts of image",
|
||||
value=st.session_state['defaults'].img2img.mask_restore,
|
||||
help="Enable to restore the unmasked parts of the image with the input, may not blend as well but preserves detail")
|
||||
save_individual_images = st.checkbox("Save individual images.", value=st.session_state['defaults'].img2img.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
save_grid = st.checkbox("Save grid",value=st.session_state['defaults'].img2img.save_grid, help="Save a grid with all the images generated into a single image.")
|
||||
group_by_prompt = st.checkbox("Group results by prompt", value=st.session_state['defaults'].img2img.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. \
|
||||
When using a prompt matrix each prompt combination will have its own folder.")
|
||||
write_info_files = st.checkbox("Write Info file", value=st.session_state['defaults'].img2img.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].img2img.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
with st.expander("Output Settings"):
|
||||
separate_prompts = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].img2img.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
normalize_prompt_weights = st.checkbox("Normalize Prompt Weights.", value=st.session_state['defaults'].img2img.normalize_prompt_weights,
|
||||
help="Ensure the sum of all weights add up to 1.0")
|
||||
loopback = st.checkbox("Loopback.", value=st.session_state['defaults'].img2img.loopback, help="Use images from previous batch when creating next batch.")
|
||||
random_seed_loopback = st.checkbox("Random loopback seed.", value=st.session_state['defaults'].img2img.random_seed_loopback, help="Random loopback seed")
|
||||
img2img_mask_restore = st.checkbox("Only modify regenerated parts of image",
|
||||
value=st.session_state['defaults'].img2img.mask_restore,
|
||||
help="Enable to restore the unmasked parts of the image with the input, may not blend as well but preserves detail")
|
||||
save_individual_images = st.checkbox("Save individual images.", value=st.session_state['defaults'].img2img.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
save_grid = st.checkbox("Save grid",value=st.session_state['defaults'].img2img.save_grid, help="Save a grid with all the images generated into a single image.")
|
||||
group_by_prompt = st.checkbox("Group results by prompt", value=st.session_state['defaults'].img2img.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. \
|
||||
When using a prompt matrix each prompt combination will have its own folder.")
|
||||
write_info_files = st.checkbox("Write Info file", value=st.session_state['defaults'].img2img.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].img2img.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
|
||||
#
|
||||
# check if GFPGAN, RealESRGAN and LDSR are available.
|
||||
@ -656,7 +652,7 @@ def layout():
|
||||
try:
|
||||
output_images, seed, info, stats = img2img(prompt=prompt, init_info=new_img, init_info_mask=new_mask, mask_mode=mask_mode,
|
||||
mask_restore=img2img_mask_restore, ddim_steps=st.session_state["sampling_steps"],
|
||||
sampler_name=st.session_state["sampler_name"], n_iter=batch_count,
|
||||
sampler_name=st.session_state["sampler_name"], n_iter=st.session_state["batch_count"],
|
||||
cfg_scale=cfg_scale, denoising_strength=st.session_state["denoising_strength"], variant_seed=variant_seed,
|
||||
seed=seed, noise_mode=noise_mode, find_noise_steps=find_noise_steps, width=width,
|
||||
height=height, variant_amount=variant_amount,
|
||||
|
@ -2249,7 +2249,7 @@ def process_images(
|
||||
|
||||
st.session_state["preview_image"].image(image)
|
||||
|
||||
if use_GFPGAN and server_state["GFPGAN"] is not None and not use_RealESRGAN:
|
||||
if use_GFPGAN and server_state["GFPGAN"] is not None and not use_RealESRGAN and not use_LDSR:
|
||||
st.session_state["progress_bar_text"].text("Running GFPGAN on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
|
||||
torch_gc()
|
||||
@ -2275,31 +2275,6 @@ def process_images(
|
||||
grid_captions.append( captions[i] + "\ngfpgan" )
|
||||
|
||||
#
|
||||
elif use_GFPGAN and server_state["GFPGAN"] is not None and not use_LDSR:
|
||||
st.session_state["progress_bar_text"].text("Running GFPGAN on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
|
||||
torch_gc()
|
||||
cropped_faces, restored_faces, restored_img = server_state["GFPGAN"].enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||
|
||||
gfpgan_sample = restored_img[:,:,::-1]
|
||||
gfpgan_image = Image.fromarray(gfpgan_sample)
|
||||
|
||||
#if st.session_state["GFPGAN_strenght"]:
|
||||
#gfpgan_sample = Image.blend(image, gfpgan_image, st.session_state["GFPGAN_strenght"])
|
||||
|
||||
gfpgan_filename = original_filename + '-gfpgan'
|
||||
|
||||
save_sample(gfpgan_image, sample_path_i, gfpgan_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback,
|
||||
uses_random_seed_loopback, save_grid, sort_samples, sampler_name, ddim_eta,
|
||||
n_iter, batch_size, i, denoising_strength, resize_mode, False, server_state["loaded_model"])
|
||||
|
||||
output_images.append(gfpgan_image) #287
|
||||
run_images.append(gfpgan_image)
|
||||
|
||||
if simple_templating:
|
||||
grid_captions.append( captions[i] + "\ngfpgan" )
|
||||
|
||||
elif use_RealESRGAN and server_state["RealESRGAN"] is not None and not use_GFPGAN:
|
||||
st.session_state["progress_bar_text"].text("Running RealESRGAN on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
#skip_save = True # #287 >_>
|
||||
@ -2341,14 +2316,14 @@ def process_images(
|
||||
|
||||
result = server_state["LDSR"].superResolution(image, 2, 2, 2)
|
||||
ldsr_filename = original_filename + '-ldsr4x'
|
||||
ldsr_sample = result[:,:,::-1]
|
||||
ldsr_image = Image.fromarray(ldsr_sample)
|
||||
#ldsr_sample = result[:,:,::-1]
|
||||
#ldsr_image = Image.fromarray(ldsr_sample)
|
||||
|
||||
#save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
#normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
|
||||
#save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
|
||||
|
||||
save_sample(esrgan_image, sample_path_i, ldsr_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
save_sample(result, sample_path_i, ldsr_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback,
|
||||
save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False, server_state["loaded_model"])
|
||||
|
||||
@ -2356,7 +2331,37 @@ def process_images(
|
||||
run_images.append(ldsr_image)
|
||||
|
||||
if simple_templating:
|
||||
grid_captions.append( captions[i] + "\nldsr" )
|
||||
grid_captions.append( captions[i] + "\nldsr" )
|
||||
|
||||
#
|
||||
elif use_LDSR and server_state["LDSR"] is not None and use_GFPGAN:
|
||||
print ("Running GFPGAN+LDSR on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
st.session_state["progress_bar_text"].text("Running GFPGAN+LDSR on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
#skip_save = True # #287 >_>
|
||||
torch_gc()
|
||||
|
||||
if server_state["LDSR"].name != LDSR_model_name:
|
||||
#try_loading_RealESRGAN(realesrgan_model_name)
|
||||
load_models(use_LDSR=use_LDSR, LDSR_model=LDSR_model_name, use_GFPGAN=use_GFPGAN, use_RealESRGAN=use_RealESRGAN, RealESRGAN_model=realesrgan_model_name)
|
||||
|
||||
result = server_state["LDSR"].superResolution(image, 2, 2, 2)
|
||||
ldsr_filename = original_filename + '-gfpgan-ldsr2x'
|
||||
#ldsr_sample = result[:,:,::-1]
|
||||
#ldsr_image = Image.fromarray(result)
|
||||
|
||||
#save_sample(image, sample_path_i, original_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
#normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback, skip_save,
|
||||
#save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode)
|
||||
|
||||
save_sample(result, sample_path_i, ldsr_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
normalize_prompt_weights, use_GFPGAN, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback,
|
||||
save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False, server_state["loaded_model"])
|
||||
|
||||
output_images.append(result) #287
|
||||
run_images.append(result)
|
||||
|
||||
if simple_templating:
|
||||
grid_captions.append( captions[i] + "\ngfpgan-ldsr" )
|
||||
|
||||
elif use_RealESRGAN and server_state["RealESRGAN"] is not None and use_GFPGAN and server_state["GFPGAN"] is not None:
|
||||
st.session_state["progress_bar_text"].text("Running GFPGAN+RealESRGAN on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
@ -2385,32 +2390,7 @@ def process_images(
|
||||
grid_captions.append( captions[i] + "\ngfpgan_esrgan" )
|
||||
|
||||
#
|
||||
elif use_LDSR and server_state["LDSR"] is not None and use_GFPGAN and server_state["GFPGAN"] is not None:
|
||||
st.session_state["progress_bar_text"].text("Running GFPGAN+LDSR on image %d of %d..." % (i+1, len(x_samples_ddim)))
|
||||
#skip_save = True # #287 >_>
|
||||
torch_gc()
|
||||
cropped_faces, restored_faces, restored_img = server_state["LDSR"].enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||
gfpgan_sample = restored_img[:,:,::-1]
|
||||
|
||||
if server_state["LDSR"].model.name != ldsr_model_name:
|
||||
#try_loading_RealESRGAN(realesrgan_model_name)
|
||||
load_models(use_LDSR=use_LDSR, LDSR_model=LDSR_model_name,use_GFPGAN=use_GFPGAN, use_RealESRGAN=use_RealESRGAN, RealESRGAN_model=realesrgan_model_name)
|
||||
|
||||
output, img_mode = server_state["LDSR"].enhance(gfpgan_sample[:,:,::-1])
|
||||
gfpgan_ldsr_filename = original_filename + '-gfpgan-ldsr4x'
|
||||
gfpgan_ldsr_sample = output[:,:,::-1]
|
||||
gfpgan_ldsr_image = Image.fromarray(gfpgan_ldsr_sample)
|
||||
|
||||
save_sample(gfpgan_ldsr_image, sample_path_i, gfpgan_ldsr_filename, jpg_sample, prompts, seeds, width, height, steps, cfg_scale,
|
||||
normalize_prompt_weights, False, write_info_files, prompt_matrix, init_img, uses_loopback, uses_random_seed_loopback,
|
||||
save_grid, sort_samples, sampler_name, ddim_eta, n_iter, batch_size, i, denoising_strength, resize_mode, False, server_state["loaded_model"])
|
||||
|
||||
output_images.append(gfpgan_ldsr_image) #287
|
||||
run_images.append(gfpgan_ldsr_image)
|
||||
|
||||
if simple_templating:
|
||||
grid_captions.append( captions[i] + "\ngfpgan_ldsr" )
|
||||
|
||||
|
||||
else:
|
||||
output_images.append(image)
|
||||
run_images.append(image)
|
||||
|
@ -200,25 +200,30 @@ def layout():
|
||||
seed = st.text_input("Seed:", value=st.session_state['defaults'].txt2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
|
||||
with st.expander("Batch Options"):
|
||||
batch_count = st.slider("Batch count.", min_value=st.session_state['defaults'].txt2img.batch_count.min_value, max_value=st.session_state['defaults'].txt2img.batch_count.max_value,
|
||||
value=st.session_state['defaults'].txt2img.batch_count.value, step=st.session_state['defaults'].txt2img.batch_count.step,
|
||||
help="How many iterations or batches of images to generate in total.")
|
||||
#batch_count = st.slider("Batch count.", min_value=st.session_state['defaults'].txt2img.batch_count.min_value, max_value=st.session_state['defaults'].txt2img.batch_count.max_value,
|
||||
#value=st.session_state['defaults'].txt2img.batch_count.value, step=st.session_state['defaults'].txt2img.batch_count.step,
|
||||
#help="How many iterations or batches of images to generate in total.")
|
||||
|
||||
batch_size = st.slider("Batch size", min_value=st.session_state['defaults'].txt2img.batch_size.min_value, max_value=st.session_state['defaults'].txt2img.batch_size.max_value,
|
||||
value=st.session_state.defaults.txt2img.batch_size.value, step=st.session_state.defaults.txt2img.batch_size.step,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
Default: 1")
|
||||
#batch_size = st.slider("Batch size", min_value=st.session_state['defaults'].txt2img.batch_size.min_value, max_value=st.session_state['defaults'].txt2img.batch_size.max_value,
|
||||
#value=st.session_state.defaults.txt2img.batch_size.value, step=st.session_state.defaults.txt2img.batch_size.step,
|
||||
#help="How many images are at once in a batch.\
|
||||
#It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
#Default: 1")
|
||||
|
||||
st.session_state["batch_count"] = int(st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||
help="How many iterations or batches of images to generate in total."))
|
||||
|
||||
st.session_state["batch_size"] = int(st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
Default: 1") )
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].txt2img.update_preview,
|
||||
help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
|
||||
st.session_state["update_preview"] = st.session_state["defaults"].general.update_preview
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=st.session_state['defaults'].txt2img.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
is set to 10 step.")
|
||||
|
||||
with col2:
|
||||
preview_tab, gallery_tab = st.tabs(["Preview", "Gallery"])
|
||||
@ -268,23 +273,24 @@ def layout():
|
||||
index=sampler_name_list.index(st.session_state['defaults'].txt2img.default_sampler), help="Sampling method to use. Default: k_euler")
|
||||
|
||||
with st.expander("Advanced"):
|
||||
separate_prompts = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].txt2img.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
|
||||
normalize_prompt_weights = st.checkbox("Normalize Prompt Weights.", value=st.session_state['defaults'].txt2img.normalize_prompt_weights,
|
||||
help="Ensure the sum of all weights add up to 1.0")
|
||||
|
||||
save_individual_images = st.checkbox("Save individual images.", value=st.session_state['defaults'].txt2img.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
|
||||
save_grid = st.checkbox("Save grid",value=st.session_state['defaults'].txt2img.save_grid, help="Save a grid with all the images generated into a single image.")
|
||||
group_by_prompt = st.checkbox("Group results by prompt", value=st.session_state['defaults'].txt2img.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt matrix each prompt combination will have its own folder.")
|
||||
|
||||
write_info_files = st.checkbox("Write Info file", value=st.session_state['defaults'].txt2img.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].txt2img.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
with st.expander("Output Settings"):
|
||||
separate_prompts = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].txt2img.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
|
||||
normalize_prompt_weights = st.checkbox("Normalize Prompt Weights.", value=st.session_state['defaults'].txt2img.normalize_prompt_weights,
|
||||
help="Ensure the sum of all weights add up to 1.0")
|
||||
|
||||
save_individual_images = st.checkbox("Save individual images.", value=st.session_state['defaults'].txt2img.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
|
||||
save_grid = st.checkbox("Save grid",value=st.session_state['defaults'].txt2img.save_grid, help="Save a grid with all the images generated into a single image.")
|
||||
group_by_prompt = st.checkbox("Group results by prompt", value=st.session_state['defaults'].txt2img.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt matrix each prompt combination will have its own folder.")
|
||||
|
||||
write_info_files = st.checkbox("Write Info file", value=st.session_state['defaults'].txt2img.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].txt2img.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
|
||||
# check if GFPGAN, RealESRGAN and LDSR are available.
|
||||
if "GFPGAN_available" not in st.session_state:
|
||||
@ -390,7 +396,7 @@ def layout():
|
||||
#try:
|
||||
#
|
||||
|
||||
output_images, seeds, info, stats = txt2img(prompt, st.session_state.sampling_steps, sampler_name, batch_count, batch_size,
|
||||
output_images, seeds, info, stats = txt2img(prompt, st.session_state.sampling_steps, sampler_name, st.session_state["batch_count"], st.session_state["batch_size"],
|
||||
cfg_scale, seed, height, width, separate_prompts, normalize_prompt_weights, save_individual_images,
|
||||
save_grid, group_by_prompt, save_as_jpg, st.session_state["use_GFPGAN"], st.session_state['GFPGAN_model'],
|
||||
use_RealESRGAN=st.session_state["use_RealESRGAN"], RealESRGAN_model=st.session_state["RealESRGAN_model"],
|
||||
|
@ -613,7 +613,8 @@ def layout():
|
||||
#uploaded_images = st.file_uploader("Upload Image", accept_multiple_files=False, type=["png", "jpg", "jpeg", "webp"],
|
||||
#help="Upload an image which will be used for the image to image generation.")
|
||||
seed = st.text_input("Seed:", value=st.session_state['defaults'].txt2vid.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
#batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=st.session_state['defaults'].txt2vid.batch_count, step=1, help="How many iterations or batches of images to generate in total.")
|
||||
#batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=st.session_state['defaults'].txt2vid.batch_count,
|
||||
# step=1, help="How many iterations or batches of images to generate in total.")
|
||||
#batch_size = st.slider("Batch size", min_value=1, max_value=250, value=st.session_state['defaults'].txt2vid.batch_size, step=1,
|
||||
#help="How many images are at once in a batch.\
|
||||
#It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
@ -622,11 +623,12 @@ def layout():
|
||||
st.session_state["max_frames"] = int(st.text_input("Max Frames:", value=st.session_state['defaults'].txt2vid.max_frames, help="Specify the max number of frames you want to generate."))
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].txt2vid.update_preview,
|
||||
help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
By default this is enabled and the frequency is set to 1 step.")
|
||||
#st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].txt2vid.update_preview,
|
||||
#help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
#You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
#By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
st.session_state["update_preview"] = st.session_state["defaults"].general.update_preview
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=st.session_state['defaults'].txt2vid.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
@ -710,26 +712,27 @@ def layout():
|
||||
#help="Press the Enter key to summit, when 'No' is selected you can use the Enter key to write multiple lines.")
|
||||
|
||||
with st.expander("Advanced"):
|
||||
st.session_state["separate_prompts"] = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].txt2vid.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
st.session_state["normalize_prompt_weights"] = st.checkbox("Normalize Prompt Weights.",
|
||||
value=st.session_state['defaults'].txt2vid.normalize_prompt_weights, help="Ensure the sum of all weights add up to 1.0")
|
||||
st.session_state["save_individual_images"] = st.checkbox("Save individual images.",
|
||||
value=st.session_state['defaults'].txt2vid.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
st.session_state["save_video"] = st.checkbox("Save video",value=st.session_state['defaults'].txt2vid.save_video,
|
||||
help="Save a video with all the images generated as frames at the end of the generation.")
|
||||
|
||||
st.session_state["group_by_prompt"] = st.checkbox("Group results by prompt", value=st.session_state['defaults'].txt2vid.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt matrix each prompt combination will have its own folder.")
|
||||
st.session_state["write_info_files"] = st.checkbox("Write Info file", value=st.session_state['defaults'].txt2vid.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
st.session_state["dynamic_preview_frequency"] = st.checkbox("Dynamic Preview Frequency", value=st.session_state['defaults'].txt2vid.dynamic_preview_frequency,
|
||||
help="This option tries to find the best value at which we can update \
|
||||
the preview image during generation while minimizing the impact it has in performance. Default: True")
|
||||
st.session_state["do_loop"] = st.checkbox("Do Loop", value=st.session_state['defaults'].txt2vid.do_loop,
|
||||
help="Do loop")
|
||||
st.session_state["save_as_jpg"] = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].txt2vid.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
with st.expander("Output Settings"):
|
||||
st.session_state["separate_prompts"] = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].txt2vid.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
st.session_state["normalize_prompt_weights"] = st.checkbox("Normalize Prompt Weights.",
|
||||
value=st.session_state['defaults'].txt2vid.normalize_prompt_weights, help="Ensure the sum of all weights add up to 1.0")
|
||||
st.session_state["save_individual_images"] = st.checkbox("Save individual images.",
|
||||
value=st.session_state['defaults'].txt2vid.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
st.session_state["save_video"] = st.checkbox("Save video",value=st.session_state['defaults'].txt2vid.save_video,
|
||||
help="Save a video with all the images generated as frames at the end of the generation.")
|
||||
|
||||
st.session_state["group_by_prompt"] = st.checkbox("Group results by prompt", value=st.session_state['defaults'].txt2vid.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt matrix each prompt combination will have its own folder.")
|
||||
st.session_state["write_info_files"] = st.checkbox("Write Info file", value=st.session_state['defaults'].txt2vid.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
st.session_state["dynamic_preview_frequency"] = st.checkbox("Dynamic Preview Frequency", value=st.session_state['defaults'].txt2vid.dynamic_preview_frequency,
|
||||
help="This option tries to find the best value at which we can update \
|
||||
the preview image during generation while minimizing the impact it has in performance. Default: True")
|
||||
st.session_state["do_loop"] = st.checkbox("Do Loop", value=st.session_state['defaults'].txt2vid.do_loop,
|
||||
help="Do loop")
|
||||
st.session_state["save_as_jpg"] = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].txt2vid.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
|
||||
if server_state["GFPGAN_available"]:
|
||||
st.session_state["use_GFPGAN"] = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2vid.use_GFPGAN,
|
||||
|
Loading…
Reference in New Issue
Block a user