Changed CFG scale to be a number_input instead of a slider.

This commit is contained in:
ZeroCool940711 2022-10-17 18:01:19 -07:00
parent 27c13fb625
commit 774073c8c3
5 changed files with 599 additions and 617 deletions

View File

@ -38,6 +38,7 @@ general:
upscaling_method: "RealESRGAN"
outdir_txt2img: outputs/txt2img
outdir_img2img: outputs/img2img
outdir_img2txt: outputs/img2txt
gfpgan_cpu: False
esrgan_cpu: False
extra_models_cpu: False
@ -83,7 +84,6 @@ txt2img:
cfg_scale:
value: 7.5
min_value: 1.0
max_value: 30.0
step: 0.5
seed: ""
@ -148,7 +148,6 @@ txt2vid:
cfg_scale:
value: 7.5
min_value: 1.0
max_value: 30.0
step: 0.5
batch_count:
@ -254,7 +253,6 @@ img2img:
cfg_scale:
value: 7.5
min_value: 1.0
max_value: 30.0
step: 0.5
batch_count:
@ -277,9 +275,8 @@ img2img:
find_noise_steps:
value: 100
min_value: 0
max_value: 500
step: 10
min_value: 100
step: 100
LDSR_config:
sampling_steps: 50

View File

@ -18,15 +18,16 @@ from sd_utils import *
# streamlit imports
#streamlit components section
# streamlit components section
import streamlit_nested_layout
from streamlit_server_state import server_state, server_state_lock
#other imports
# other imports
from omegaconf import OmegaConf
# end of imports
#---------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------
def layout():
st.header("Settings")
@ -47,7 +48,6 @@ def layout():
device_list.append(f"{id}: {name} ({human_readable_size(total_memory, decimal_places=0)})")
with col1:
st.title("General")
st.session_state['defaults'].general.gpu = int(st.selectbox("GPU", device_list,
@ -93,7 +93,8 @@ def layout():
index=RealESRGAN_model_list.index(st.session_state['defaults'].general.RealESRGAN_model),
help="Default RealESRGAN model. Default: 'RealESRGAN_x4plus'")
Upscaler_list = ["RealESRGAN", "LDSR"]
st.session_state['defaults'].general.upscaling_method = st.selectbox("Upscaler", Upscaler_list, index=Upscaler_list.index(st.session_state['defaults'].general.upscaling_method), help="Default upscaling method. Default: 'RealESRGAN'")
st.session_state['defaults'].general.upscaling_method = st.selectbox("Upscaler", Upscaler_list, index=Upscaler_list.index(
st.session_state['defaults'].general.upscaling_method), help="Default upscaling method. Default: 'RealESRGAN'")
with col2:
st.title("Performance")
@ -125,7 +126,7 @@ def layout():
st.session_state["defaults"].general.use_float16 = st.checkbox("Use float16", value=st.session_state['defaults'].general.use_float16,
help="Switch the model to 16-bit floats. Default: False")
precision_list = ['full','autocast']
precision_list = ['full', 'autocast']
st.session_state["defaults"].general.precision = st.selectbox("Precision", precision_list, index=precision_list.index(st.session_state['defaults'].general.precision),
help="Evaluates at this precision. Default: autocast")
@ -151,11 +152,11 @@ def layout():
(Not properly implemented and currently not working, check this \
link 'https://github.com/huggingface/diffusers/pull/537' for more information on it ). Default: False")
#st.session_state["defaults"].general.update_preview = st.checkbox("Update Preview Image", value=st.session_state['defaults'].general.update_preview,
#help="Enables the preview image to be updated and shown to the user on the UI during the generation.\
#If checked, once you save the settings an option to specify the frequency at which the image is updated\
#in steps will be shown, this is helpful to reduce the negative effect this option has on performance. \
#Default: True")
# st.session_state["defaults"].general.update_preview = st.checkbox("Update Preview Image", value=st.session_state['defaults'].general.update_preview,
# help="Enables the preview image to be updated and shown to the user on the UI during the generation.\
# If checked, once you save the settings an option to specify the frequency at which the image is updated\
# in steps will be shown, this is helpful to reduce the negative effect this option has on performance. \
# Default: True")
st.session_state["defaults"].general.update_preview = True
st.session_state["defaults"].general.update_preview_frequency = st.number_input("Update Preview Frequency",
min_value=1,
@ -181,7 +182,7 @@ def layout():
st.session_state["defaults"].general.save_metadata = st.checkbox("Save Metadata", value=st.session_state['defaults'].general.save_metadata,
help="Save metadata on the output image. Default: True")
save_format_list = ["png"]
st.session_state["defaults"].general.save_format = st.selectbox("Save Format",save_format_list, index=save_format_list.index(st.session_state['defaults'].general.save_format),
st.session_state["defaults"].general.save_format = st.selectbox("Save Format", save_format_list, index=save_format_list.index(st.session_state['defaults'].general.save_format),
help="Format that will be used whens saving the output images. Default: 'png'")
st.session_state["defaults"].general.skip_grid = st.checkbox("Skip Grid", value=st.session_state['defaults'].general.skip_grid,
@ -202,8 +203,6 @@ def layout():
st.session_state["defaults"].daisi_app.running_on_daisi_io = st.checkbox("Running on Daisi.io?", value=st.session_state['defaults'].daisi_app.running_on_daisi_io,
help="Specify if we are running on app.Daisi.io . Default: False")
with col4:
st.title("Streamlit Config")
@ -258,10 +257,6 @@ def layout():
st.session_state["defaults"].txt2img.cfg_scale.min_value = st.number_input("Minimum CFG Scale Value", value=st.session_state['defaults'].txt2img.cfg_scale.min_value,
help="Set the default minimum value for the CFG scale slider. Default is: 1")
st.session_state["defaults"].txt2img.cfg_scale.max_value = st.number_input("Maximum CFG Scale Value",
value=st.session_state['defaults'].txt2img.cfg_scale.max_value,
help="Set the default maximum value for the CFG scale slider. Default is: 30")
st.session_state["defaults"].txt2img.cfg_scale.step = st.number_input("CFG Slider Steps", value=st.session_state['defaults'].txt2img.cfg_scale.step,
help="Set the default value for the number of steps on the CFG scale slider. Default is: 0.5")
# Sampling Steps
@ -291,7 +286,8 @@ def layout():
default_sampler_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
st.session_state["defaults"].txt2img.default_sampler = st.selectbox("Default Sampler",
default_sampler_list, index=default_sampler_list.index(st.session_state['defaults'].txt2img.default_sampler),
default_sampler_list, index=default_sampler_list.index(
st.session_state['defaults'].txt2img.default_sampler),
help="Defaut sampler to use for txt2img. Default: k_euler")
st.session_state['defaults'].txt2img.seed = st.text_input("Default Seed", value=st.session_state['defaults'].txt2img.seed, help="Default seed.")
@ -321,7 +317,8 @@ def layout():
st.session_state["defaults"].txt2img.write_info_files = st.checkbox("Write Info Files For Images", value=st.session_state['defaults'].txt2img.write_info_files,
help="Choose to write the info files along with the generated images. Default: True")
st.session_state["defaults"].txt2img.use_GFPGAN = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Choose to use GFPGAN. Default: False")
st.session_state["defaults"].txt2img.use_GFPGAN = st.checkbox(
"Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Choose to use GFPGAN. Default: False")
st.session_state["defaults"].txt2img.use_upscaling = st.checkbox("Use Upscaling", value=st.session_state['defaults'].txt2img.use_upscaling,
help="Choose to turn on upscaling by default. Default: False")
@ -418,10 +415,6 @@ def layout():
value=st.session_state['defaults'].img2img.cfg_scale.min_value,
help="Set the default minimum value for the CFG scale slider. Default is: 1")
st.session_state["defaults"].img2img.cfg_scale.max_value = st.number_input("Maximum Img2Img CFG Scale Value",
value=st.session_state['defaults'].img2img.cfg_scale.max_value,
help="Set the default maximum value for the CFG scale slider. Default is: 30")
with col3:
st.session_state["defaults"].img2img.cfg_scale.step = st.number_input("Img2Img CFG Slider Steps",
value=st.session_state['defaults'].img2img.cfg_scale.step,
@ -436,7 +429,6 @@ def layout():
value=st.session_state['defaults'].img2img.sampling_steps.min_value,
help="Set the default minimum value for the sampling steps slider. Default is: 1")
st.session_state["defaults"].img2img.sampling_steps.step = st.number_input("Img2Img Sampling Slider Steps",
value=st.session_state['defaults'].img2img.sampling_steps.step,
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10")
@ -478,14 +470,10 @@ def layout():
value=st.session_state['defaults'].img2img.find_noise_steps.min_value,
help="Set the default minimum value for the find noise steps slider. Default is: 0")
st.session_state["defaults"].img2img.find_noise_steps.max_value = st.number_input("Maximum Find Noise Steps",
value=st.session_state['defaults'].img2img.find_noise_steps.max_value,
help="Set the default maximum value for the find noise steps slider. Default is: 500")
st.session_state["defaults"].img2img.find_noise_steps.step = st.number_input("Find Noise Slider Steps",
value=st.session_state['defaults'].img2img.find_noise_steps.step,
help="Set the default value for the number of steps on the find noise steps slider. \
Default is: 10")
Default is: 100")
with col5:
st.title("General Parameters")
@ -522,7 +510,8 @@ def layout():
value=st.session_state['defaults'].img2img.write_info_files,
help="Choose to write the info files along with the generated images. Default: True")
st.session_state["defaults"].img2img.use_GFPGAN = st.checkbox("Img2Img Use GFPGAN", value=st.session_state['defaults'].img2img.use_GFPGAN, help="Choose to use GFPGAN. Default: False")
st.session_state["defaults"].img2img.use_GFPGAN = st.checkbox(
"Img2Img Use GFPGAN", value=st.session_state['defaults'].img2img.use_GFPGAN, help="Choose to use GFPGAN. Default: False")
st.session_state["defaults"].img2img.use_RealESRGAN = st.checkbox("Img2Img Use RealESRGAN", value=st.session_state['defaults'].img2img.use_RealESRGAN,
help="Choose to use RealESRGAN. Default: False")
@ -605,10 +594,6 @@ def layout():
value=st.session_state['defaults'].txt2vid.cfg_scale.min_value,
help="Set the default minimum value for the CFG scale slider. Default is: 1")
st.session_state["defaults"].txt2vid.cfg_scale.max_value = st.number_input("Maximum txt2vid CFG Scale Value",
value=st.session_state['defaults'].txt2vid.cfg_scale.max_value,
help="Set the default maximum value for the CFG scale slider. Default is: 30")
st.session_state["defaults"].txt2vid.cfg_scale.step = st.number_input("txt2vid CFG Slider Steps",
value=st.session_state['defaults'].txt2vid.cfg_scale.step,
help="Set the default value for the number of steps on the CFG scale slider. Default is: 0.5")
@ -685,7 +670,7 @@ def layout():
st.session_state["defaults"].txt2vid.save_video = st.checkbox("Save Txt2Vid Video", value=st.session_state['defaults'].txt2vid.save_video,
help="Choose to save the Txt2Vid video. Default: True")
st.session_state["defaults"].txt2vid.save_video_on_stop = st.checkbox("Save video on Stop",value=st.session_state['defaults'].txt2vid.save_video_on_stop,
st.session_state["defaults"].txt2vid.save_video_on_stop = st.checkbox("Save video on Stop", value=st.session_state['defaults'].txt2vid.save_video_on_stop,
help="Save a video with all the images generated as frames when we hit the stop button \
during a generation.")
@ -738,7 +723,6 @@ def layout():
st.session_state['defaults'].txt2vid.variant_seed = st.text_input("Default txt2vid Variation Seed",
value=st.session_state['defaults'].txt2vid.variant_seed, help="Default variation seed.")
with col5:
st.title("Beta Parameters")
@ -817,7 +801,7 @@ def layout():
# We need a submit button to save the Settings
# as well as one to reset them to the defaults, just in case.
_, _, save_button_col, reset_button_col, _, _ = st.columns([1,1,1,1,1,1], gap="large")
_, _, save_button_col, reset_button_col, _, _ = st.columns([1, 1, 1, 1, 1, 1], gap="large")
with save_button_col:
save_button = st.form_submit_button("Save")

View File

@ -405,9 +405,9 @@ def layout():
value=st.session_state['defaults'].img2img.height.value, step=st.session_state['defaults'].img2img.height.step)
seed = st.text_input("Seed:", value=st.session_state['defaults'].img2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].img2img.cfg_scale.min_value,
max_value=st.session_state['defaults'].img2img.cfg_scale.max_value, value=st.session_state['defaults'].img2img.cfg_scale.value,
step=st.session_state['defaults'].img2img.cfg_scale.step, help="How strongly the image should follow the prompt.")
cfg_scale = st.number_input("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].img2img.cfg_scale.min_value,
step=st.session_state['defaults'].img2img.cfg_scale.step,
help="How strongly the image should follow the prompt.")
st.session_state["denoising_strength"] = st.slider("Denoising Strength:", value=st.session_state['defaults'].img2img.denoising_strength.value,
min_value=st.session_state['defaults'].img2img.denoising_strength.min_value,
@ -431,8 +431,8 @@ def layout():
help=""
)
noise_mode = noise_mode_list.index(noise_mode)
find_noise_steps = st.slider("Find Noise Steps", value=st.session_state['defaults'].img2img.find_noise_steps.value,
min_value=st.session_state['defaults'].img2img.find_noise_steps.min_value, max_value=st.session_state['defaults'].img2img.find_noise_steps.max_value,
find_noise_steps = st.number_input("Find Noise Steps", value=st.session_state['defaults'].img2img.find_noise_steps.value,
min_value=st.session_state['defaults'].img2img.find_noise_steps.min_value,
step=st.session_state['defaults'].img2img.find_noise_steps.step)
with st.expander("Batch Options"):

View File

@ -412,10 +412,10 @@ def layout():
value=st.session_state['defaults'].txt2img.width.value, step=st.session_state['defaults'].txt2img.width.step)
height = st.slider("Height:", min_value=st.session_state['defaults'].txt2img.height.min_value, max_value=st.session_state['defaults'].txt2img.height.max_value,
value=st.session_state['defaults'].txt2img.height.value, step=st.session_state['defaults'].txt2img.height.step)
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].txt2img.cfg_scale.min_value,
max_value=st.session_state['defaults'].txt2img.cfg_scale.max_value,
cfg_scale = st.number_input("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].txt2img.cfg_scale.min_value,
value=st.session_state['defaults'].txt2img.cfg_scale.value, step=st.session_state['defaults'].txt2img.cfg_scale.step,
help="How strongly the image should follow the prompt.")
seed = st.text_input("Seed:", value=st.session_state['defaults'].txt2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
with st.expander("Batch Options"):

View File

@ -651,9 +651,10 @@ def layout():
value=st.session_state['defaults'].txt2vid.width.value, step=st.session_state['defaults'].txt2vid.width.step)
height = st.slider("Height:", min_value=st.session_state['defaults'].txt2vid.height.min_value, max_value=st.session_state['defaults'].txt2vid.height.max_value,
value=st.session_state['defaults'].txt2vid.height.value, step=st.session_state['defaults'].txt2vid.height.step)
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].txt2vid.cfg_scale.min_value,
max_value=st.session_state['defaults'].txt2vid.cfg_scale.max_value, value=st.session_state['defaults'].txt2vid.cfg_scale.value,
step=st.session_state['defaults'].txt2vid.cfg_scale.step, help="How strongly the image should follow the prompt.")
cfg_scale = st.number_input("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].txt2vid.cfg_scale.min_value,
value=st.session_state['defaults'].txt2vid.cfg_scale.value,
step=st.session_state['defaults'].txt2vid.cfg_scale.step,
help="How strongly the image should follow the prompt.")
#uploaded_images = st.file_uploader("Upload Image", accept_multiple_files=False, type=["png", "jpg", "jpeg", "webp"],
#help="Upload an image which will be used for the image to image generation.")