Corrected breaking issues introduced in #1136 to txt2img and

made state variables consistent with img2img.

Fixed a bug where switching models after running would not reload
the used model.
This commit is contained in:
protoplm 2022-09-14 19:59:53 -04:00
parent 178e62292e
commit 4f7adcaf42
2 changed files with 87 additions and 87 deletions

View File

@ -164,7 +164,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
del st.session_state["RealESRGAN"]
if "model" in st.session_state:
if "model" in st.session_state and st.session_state["custom_model"] == custom_model:
if "model" in st.session_state and st.session_state["loaded_model"] == custom_model:
# TODO: check if the optimized mode was changed?
print("Model already loaded")
@ -188,6 +188,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
st.session_state.model = model
st.session_state.modelCS = modelCS
st.session_state.modelFS = modelFS
st.session_state.loaded_model = custom_model
print("Model loaded.")
@ -882,26 +883,21 @@ def slerp(device, t, v0:torch.Tensor, v1:torch.Tensor, DOT_THRESHOLD=0.9995):
return v2
#
def optimize_update_preview_frequency(current_chunk_speed, previous_chunk_speed_list, update_preview_frequency, update_preview_frequency_list):
def optimize_update_preview_frequency(current_chunk_speed, previous_chunk_speed, update_preview_frequency):
"""Find the optimal update_preview_frequency value maximizing
performance while minimizing the time between updates."""
from statistics import mean
previous_chunk_avg_speed = mean(previous_chunk_speed_list)
previous_chunk_speed_list.append(current_chunk_speed)
current_chunk_avg_speed = mean(previous_chunk_speed_list)
if current_chunk_avg_speed >= previous_chunk_avg_speed:
if current_chunk_speed >= previous_chunk_speed:
#print(f"{current_chunk_speed} >= {previous_chunk_speed}")
update_preview_frequency_list.append(update_preview_frequency + 1)
update_preview_frequency +=1
previous_chunk_speed = current_chunk_speed
else:
#print(f"{current_chunk_speed} <= {previous_chunk_speed}")
update_preview_frequency_list.append(update_preview_frequency - 1)
update_preview_frequency -=1
previous_chunk_speed = current_chunk_speed
return current_chunk_speed, previous_chunk_speed, update_preview_frequency
update_preview_frequency = round(mean(update_preview_frequency_list))
return current_chunk_speed, previous_chunk_speed_list, update_preview_frequency, update_preview_frequency_list
def get_font(fontsize):

View File

@ -114,8 +114,8 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, realesrgan_model_na
width=width,
height=height,
prompt_matrix=separate_prompts,
use_GFPGAN=use_GFPGAN,
use_RealESRGAN=use_RealESRGAN,
use_GFPGAN=st.session_state["use_GFPGAN"],
use_RealESRGAN=st.session_state["use_RealESRGAN"],
realesrgan_model_name=realesrgan_model_name,
fp=fp,
ddim_eta=ddim_eta,
@ -210,9 +210,9 @@ def layout():
with col3:
# If we have custom models available on the "models/custom"
#folder then we show a menu to select which model we want to use, otherwise we use the main model for SD
#if CustomModel_available:
custom_model = st.selectbox("Custom Model:", st.session_state['defaults'].txt2vid.custom_models_list,
index=st.session_state['defaults'].txt2vid.custom_models_list.index(st.session_state['defaults'].txt2vid.default_model),
if st.session_state["CustomModel_available"]:
st.session_state["custom_model"] = st.selectbox("Custom Model:", st.session_state["custom_models"],
index=st.session_state["custom_models"].index(st.session_state['defaults'].general.default_model),
help="Select the model you want to use. This option is only available if you have custom models \
on your 'models/custom' folder. The model name that will be shown here is the same as the name\
the file for the model has on said folder, it is recommended to give the .ckpt file a name that \
@ -242,17 +242,20 @@ def layout():
write_info_files = st.checkbox("Write Info file", value=True, help="Save a file next to the image with informartion about the generation.")
save_as_jpg = st.checkbox("Save samples as jpg", value=False, help="Saves the images as jpg instead of png.")
if GFPGAN_available:
use_GFPGAN = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation. This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
if st.session_state["GFPGAN_available"]:
st.session_state["use_GFPGAN"] = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation.\
This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
else:
use_GFPGAN = False
st.session_state["use_GFPGAN"] = False
if RealESRGAN_available:
use_RealESRGAN = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2img.use_RealESRGAN, help="Uses the RealESRGAN model to upscale the images after the generation. This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
RealESRGAN_model = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
if st.session_state["RealESRGAN_available"]:
st.session_state["use_RealESRGAN"] = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2img.use_RealESRGAN,
help="Uses the RealESRGAN model to upscale the images after the generation.\
This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
st.session_state["RealESRGAN_model"] = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
else:
use_RealESRGAN = False
RealESRGAN_model = "RealESRGAN_x4plus"
st.session_state["use_RealESRGAN"] = False
st.session_state["RealESRGAN_model"] = "RealESRGAN_x4plus"
variant_amount = st.slider("Variant Amount:", value=st.session_state['defaults'].txt2img.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
variant_seed = st.text_input("Variant Seed:", value=st.session_state['defaults'].txt2img.seed, help="The seed to use when generating a variant, if left blank a random seed will be generated.")
@ -261,13 +264,14 @@ def layout():
if generate_button:
#print("Loading models")
# load the models when we hit the generate button for the first time, it wont be loaded after that so dont worry.
load_models(False, use_GFPGAN, use_RealESRGAN, RealESRGAN_model)
load_models(False, st.session_state["use_GFPGAN"], st.session_state["use_RealESRGAN"], st.session_state["RealESRGAN_model"], st.session_state["CustomModel_available"],
st.session_state["custom_model"])
try:
output_images, seeds, info, stats = txt2img(prompt, st.session_state.sampling_steps, sampler_name, RealESRGAN_model, batch_count, batch_size,
output_images, seeds, info, stats = txt2img(prompt, st.session_state.sampling_steps, sampler_name, st.session_state["RealESRGAN_model"], batch_count, batch_size,
cfg_scale, seed, height, width, separate_prompts, normalize_prompt_weights, save_individual_images,
save_grid, group_by_prompt, save_as_jpg, use_GFPGAN, use_RealESRGAN, RealESRGAN_model, fp=st.session_state.defaults.general.fp,
variant_amount=variant_amount, variant_seed=variant_seed, write_info_files=write_info_files)
save_grid, group_by_prompt, save_as_jpg, st.session_state["use_GFPGAN"], st.session_state["use_RealESRGAN"], st.session_state["RealESRGAN_model"],
fp=st.session_state.defaults.general.fp, variant_amount=variant_amount, variant_seed=variant_seed, write_info_files=write_info_files)
message.success('Render Complete: ' + info + '; Stats: ' + stats, icon="")