mirror of
https://github.com/Sygil-Dev/sygil-webui.git
synced 2024-12-15 06:21:34 +03:00
Fixed multiple settings and default values not working properly.
This commit is contained in:
parent
393a695aef
commit
f866ffdad4
@ -29,11 +29,11 @@ general:
|
|||||||
default_model_path: "models/ldm/stable-diffusion-v1/model.ckpt"
|
default_model_path: "models/ldm/stable-diffusion-v1/model.ckpt"
|
||||||
use_sd_concepts_library: True
|
use_sd_concepts_library: True
|
||||||
sd_concepts_library_folder: "models/custom/sd-concepts-library"
|
sd_concepts_library_folder: "models/custom/sd-concepts-library"
|
||||||
GFPGAN_dir: "./src/gfpgan"
|
GFPGAN_dir: "./models/gfpgan"
|
||||||
GFPGAN_model: "GFPGANv1.4"
|
GFPGAN_model: "GFPGANv1.4"
|
||||||
LDSR_dir: "./models/ldsr"
|
LDSR_dir: "./models/ldsr"
|
||||||
LDSR_model: "model"
|
LDSR_model: "model"
|
||||||
RealESRGAN_dir: "./src/realesrgan"
|
RealESRGAN_dir: "./models/realesrgan"
|
||||||
RealESRGAN_model: "RealESRGAN_x4plus"
|
RealESRGAN_model: "RealESRGAN_x4plus"
|
||||||
upscaling_method: "RealESRGAN"
|
upscaling_method: "RealESRGAN"
|
||||||
outdir_txt2img: outputs/txt2img
|
outdir_txt2img: outputs/txt2img
|
||||||
@ -91,6 +91,9 @@ txt2img:
|
|||||||
|
|
||||||
sampling_steps:
|
sampling_steps:
|
||||||
value: 30
|
value: 30
|
||||||
|
min_value: 10
|
||||||
|
max_value: 250
|
||||||
|
step: 10
|
||||||
|
|
||||||
LDSR_config:
|
LDSR_config:
|
||||||
sampling_steps: 50
|
sampling_steps: 50
|
||||||
|
@ -282,14 +282,14 @@ def layout():
|
|||||||
st.title("General Parameters")
|
st.title("General Parameters")
|
||||||
|
|
||||||
# Batch Count
|
# Batch Count
|
||||||
st.session_state["batch_count"] = st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
st.session_state["defaults"].txt2img.batch_count.value = int(st.text_input("Batch count", value=st.session_state['defaults'].txt2img.batch_count.value,
|
||||||
help="How many iterations or batches of images to generate in total.")
|
help="How many iterations or batches of images to generate in total."))
|
||||||
|
|
||||||
st.session_state["batch_size"] = st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
st.session_state["defaults"].txt2img.batch_size.value = int(st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||||
help="How many images are at once in a batch.\
|
help="How many images are at once in a batch.\
|
||||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
||||||
takes to finish generation as more images are generated at once.\
|
takes to finish generation as more images are generated at once.\
|
||||||
Default: 1")
|
Default: 1"))
|
||||||
|
|
||||||
default_sampler_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
|
default_sampler_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
|
||||||
st.session_state["defaults"].txt2img.default_sampler = st.selectbox("Default Sampler",
|
st.session_state["defaults"].txt2img.default_sampler = st.selectbox("Default Sampler",
|
||||||
@ -446,14 +446,14 @@ def layout():
|
|||||||
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
||||||
|
|
||||||
# Batch Count
|
# Batch Count
|
||||||
st.session_state["batch_count"] = st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
st.session_state["defaults"].img2img.batch_count.value = int(st.text_input("Img2img Batch count", value=st.session_state["defaults"].img2img.batch_count.value,
|
||||||
help="How many iterations or batches of images to generate in total.")
|
help="How many iterations or batches of images to generate in total."))
|
||||||
|
|
||||||
st.session_state["batch_size"] = st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
st.session_state["defaults"].img2img.batch_size.value = int(st.text_input("Img2img Batch size", value=st.session_state["defaults"].img2img.batch_size.value,
|
||||||
help="How many images are at once in a batch.\
|
help="How many images are at once in a batch.\
|
||||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
||||||
takes to finish generation as more images are generated at once.\
|
takes to finish generation as more images are generated at once.\
|
||||||
Default: 1")
|
Default: 1"))
|
||||||
with col4:
|
with col4:
|
||||||
# Inference Steps
|
# Inference Steps
|
||||||
st.session_state["defaults"].img2img.num_inference_steps.value = int(st.text_input("Default Inference Steps",
|
st.session_state["defaults"].img2img.num_inference_steps.value = int(st.text_input("Default Inference Steps",
|
||||||
@ -635,14 +635,14 @@ def layout():
|
|||||||
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
help="Set the default value for the number of steps on the sampling steps slider. Default is: 10"))
|
||||||
|
|
||||||
# Batch Count
|
# Batch Count
|
||||||
st.session_state["batch_count"] = st.text_input("Batch count.", value=st.session_state['defaults'].txt2img.batch_count.value,
|
st.session_state["defaults"].txt2vid.batch_count.value = int(st.text_input("txt2vid Batch count", value=st.session_state['defaults'].txt2vid.batch_count.value,
|
||||||
help="How many iterations or batches of images to generate in total.")
|
help="How many iterations or batches of images to generate in total."))
|
||||||
|
|
||||||
st.session_state["batch_size"] = st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
st.session_state["defaults"].txt2vid.batch_size.value = int(st.text_input("txt2vid Batch size", value=st.session_state.defaults.txt2vid.batch_size.value,
|
||||||
help="How many images are at once in a batch.\
|
help="How many images are at once in a batch.\
|
||||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it \
|
||||||
takes to finish generation as more images are generated at once.\
|
takes to finish generation as more images are generated at once.\
|
||||||
Default: 1")
|
Default: 1") )
|
||||||
|
|
||||||
# Inference Steps
|
# Inference Steps
|
||||||
st.session_state["defaults"].txt2vid.num_inference_steps.value = int(st.text_input("Default Txt2Vid Inference Steps",
|
st.session_state["defaults"].txt2vid.num_inference_steps.value = int(st.text_input("Default Txt2Vid Inference Steps",
|
||||||
|
@ -214,9 +214,10 @@ def layout():
|
|||||||
help="How many iterations or batches of images to generate in total."))
|
help="How many iterations or batches of images to generate in total."))
|
||||||
|
|
||||||
st.session_state["batch_size"] = int(st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
st.session_state["batch_size"] = int(st.text_input("Batch size", value=st.session_state.defaults.txt2img.batch_size.value,
|
||||||
help="How many images are at once in a batch.\
|
help="How many images are at once in a batch.\
|
||||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes \
|
||||||
Default: 1") )
|
to finish generation as more images are generated at once.\
|
||||||
|
Default: 1") )
|
||||||
|
|
||||||
with st.expander("Preview Settings"):
|
with st.expander("Preview Settings"):
|
||||||
|
|
||||||
@ -336,8 +337,9 @@ def layout():
|
|||||||
if st.session_state["LDSR_available"]:
|
if st.session_state["LDSR_available"]:
|
||||||
upscaling_method_list.append("LDSR")
|
upscaling_method_list.append("LDSR")
|
||||||
|
|
||||||
|
#print (st.session_state["RealESRGAN_available"])
|
||||||
st.session_state["upscaling_method"] = st.selectbox("Upscaling Method", upscaling_method_list,
|
st.session_state["upscaling_method"] = st.selectbox("Upscaling Method", upscaling_method_list,
|
||||||
index=upscaling_method_list.index(st.session_state['defaults'].general.upscaling_method))
|
index=upscaling_method_list.index(str(st.session_state['defaults'].general.upscaling_method)))
|
||||||
|
|
||||||
if st.session_state["RealESRGAN_available"]:
|
if st.session_state["RealESRGAN_available"]:
|
||||||
with st.expander("RealESRGAN"):
|
with st.expander("RealESRGAN"):
|
||||||
|
@ -867,13 +867,13 @@ def layout():
|
|||||||
# run video generation
|
# run video generation
|
||||||
video, seed, info, stats = txt2vid(prompts=prompt, gpu=st.session_state["defaults"].general.gpu,
|
video, seed, info, stats = txt2vid(prompts=prompt, gpu=st.session_state["defaults"].general.gpu,
|
||||||
num_steps=st.session_state.sampling_steps, max_frames=int(st.session_state.max_frames),
|
num_steps=st.session_state.sampling_steps, max_frames=int(st.session_state.max_frames),
|
||||||
num_inference_steps=st.session_state.num_inference_steps,
|
num_inference_steps=st.session_state.num_inference_steps,
|
||||||
cfg_scale=cfg_scale,do_loop=st.session_state["do_loop"],
|
cfg_scale=cfg_scale,do_loop=st.session_state["do_loop"],
|
||||||
seeds=seed, quality=100, eta=0.0, width=width,
|
seeds=seed, quality=100, eta=0.0, width=width,
|
||||||
height=height, weights_path=custom_model, scheduler=scheduler_name,
|
height=height, weights_path=custom_model, scheduler=scheduler_name,
|
||||||
disable_tqdm=False, beta_start=st.session_state['defaults'].txt2vid.beta_start.value,
|
disable_tqdm=False, beta_start=st.session_state['defaults'].txt2vid.beta_start.value,
|
||||||
beta_end=st.session_state['defaults'].txt2vid.beta_end.value,
|
beta_end=st.session_state['defaults'].txt2vid.beta_end.value,
|
||||||
beta_schedule=beta_scheduler_type, starting_image=None)
|
beta_schedule=beta_scheduler_type, starting_image=None)
|
||||||
|
|
||||||
#message.success('Done!', icon="✅")
|
#message.success('Done!', icon="✅")
|
||||||
message.success('Render Complete: ' + info + '; Stats: ' + stats, icon="✅")
|
message.success('Render Complete: ' + info + '; Stats: ' + stats, icon="✅")
|
||||||
|
Loading…
Reference in New Issue
Block a user