mirror of
https://github.com/sd-webui/stable-diffusion-webui.git
synced 2025-01-05 20:28:01 +03:00
Merge pull request #1139 from protoplm/dev
txt2img touch up + model switching
This commit is contained in:
commit
f74d796431
@ -164,7 +164,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
|
||||
del st.session_state["RealESRGAN"]
|
||||
|
||||
if "model" in st.session_state:
|
||||
if "model" in st.session_state and st.session_state["custom_model"] == custom_model:
|
||||
if "model" in st.session_state and st.session_state["loaded_model"] == custom_model:
|
||||
# TODO: check if the optimized mode was changed?
|
||||
print("Model already loaded")
|
||||
|
||||
@ -174,6 +174,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
|
||||
del st.session_state.model
|
||||
del st.session_state.modelCS
|
||||
del st.session_state.modelFS
|
||||
del st.session_state.loaded_model
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
@ -188,6 +189,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
|
||||
st.session_state.model = model
|
||||
st.session_state.modelCS = modelCS
|
||||
st.session_state.modelFS = modelFS
|
||||
st.session_state.loaded_model = custom_model
|
||||
|
||||
print("Model loaded.")
|
||||
|
||||
@ -792,7 +794,7 @@ def generation_callback(img, i=0):
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
if i % int(st.session_state['defaults'].general.update_preview_frequency) == 0 and st.session_state['defaults'].general.update_preview:
|
||||
if i % int(st.session_state.update_preview_frequency) == 0 and st.session_state.update_preview:
|
||||
#print (img)
|
||||
#print (type(img))
|
||||
# The following lines will convert the tensor we got on img to an actual image we can render on the UI.
|
||||
|
@ -94,7 +94,7 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, realesrgan_model_na
|
||||
def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name):
|
||||
samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=cfg_scale,
|
||||
unconditional_conditioning=unconditional_conditioning, eta=ddim_eta, x_T=x, img_callback=generation_callback,
|
||||
log_every_t=int(st.session_state['defaults'].general.update_preview_frequency))
|
||||
log_every_t=int(st.session_state.update_preview_frequency))
|
||||
|
||||
return samples_ddim
|
||||
|
||||
@ -114,8 +114,8 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, realesrgan_model_na
|
||||
width=width,
|
||||
height=height,
|
||||
prompt_matrix=separate_prompts,
|
||||
use_GFPGAN=use_GFPGAN,
|
||||
use_RealESRGAN=use_RealESRGAN,
|
||||
use_GFPGAN=st.session_state["use_GFPGAN"],
|
||||
use_RealESRGAN=st.session_state["use_RealESRGAN"],
|
||||
realesrgan_model_name=realesrgan_model_name,
|
||||
fp=fp,
|
||||
ddim_eta=ddim_eta,
|
||||
@ -210,9 +210,9 @@ def layout():
|
||||
with col3:
|
||||
# If we have custom models available on the "models/custom"
|
||||
#folder then we show a menu to select which model we want to use, otherwise we use the main model for SD
|
||||
#if CustomModel_available:
|
||||
custom_model = st.selectbox("Custom Model:", st.session_state['defaults'].txt2vid.custom_models_list,
|
||||
index=st.session_state['defaults'].txt2vid.custom_models_list.index(st.session_state['defaults'].txt2vid.default_model),
|
||||
if st.session_state["CustomModel_available"]:
|
||||
st.session_state["custom_model"] = st.selectbox("Custom Model:", st.session_state["custom_models"],
|
||||
index=st.session_state["custom_models"].index(st.session_state['defaults'].general.default_model),
|
||||
help="Select the model you want to use. This option is only available if you have custom models \
|
||||
on your 'models/custom' folder. The model name that will be shown here is the same as the name\
|
||||
the file for the model has on said folder, it is recommended to give the .ckpt file a name that \
|
||||
@ -242,17 +242,20 @@ def layout():
|
||||
write_info_files = st.checkbox("Write Info file", value=True, help="Save a file next to the image with informartion about the generation.")
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=False, help="Saves the images as jpg instead of png.")
|
||||
|
||||
if GFPGAN_available:
|
||||
use_GFPGAN = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation. This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
if st.session_state["GFPGAN_available"]:
|
||||
st.session_state["use_GFPGAN"] = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation.\
|
||||
This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
else:
|
||||
use_GFPGAN = False
|
||||
st.session_state["use_GFPGAN"] = False
|
||||
|
||||
if RealESRGAN_available:
|
||||
use_RealESRGAN = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2img.use_RealESRGAN, help="Uses the RealESRGAN model to upscale the images after the generation. This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
RealESRGAN_model = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
|
||||
if st.session_state["RealESRGAN_available"]:
|
||||
st.session_state["use_RealESRGAN"] = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2img.use_RealESRGAN,
|
||||
help="Uses the RealESRGAN model to upscale the images after the generation.\
|
||||
This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
st.session_state["RealESRGAN_model"] = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
|
||||
else:
|
||||
use_RealESRGAN = False
|
||||
RealESRGAN_model = "RealESRGAN_x4plus"
|
||||
st.session_state["use_RealESRGAN"] = False
|
||||
st.session_state["RealESRGAN_model"] = "RealESRGAN_x4plus"
|
||||
|
||||
variant_amount = st.slider("Variant Amount:", value=st.session_state['defaults'].txt2img.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
variant_seed = st.text_input("Variant Seed:", value=st.session_state['defaults'].txt2img.seed, help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
@ -261,13 +264,14 @@ def layout():
|
||||
if generate_button:
|
||||
#print("Loading models")
|
||||
# load the models when we hit the generate button for the first time, it wont be loaded after that so dont worry.
|
||||
load_models(False, use_GFPGAN, use_RealESRGAN, RealESRGAN_model)
|
||||
load_models(False, st.session_state["use_GFPGAN"], st.session_state["use_RealESRGAN"], st.session_state["RealESRGAN_model"], st.session_state["CustomModel_available"],
|
||||
st.session_state["custom_model"])
|
||||
|
||||
try:
|
||||
output_images, seeds, info, stats = txt2img(prompt, st.session_state.sampling_steps, sampler_name, RealESRGAN_model, batch_count, batch_size,
|
||||
output_images, seeds, info, stats = txt2img(prompt, st.session_state.sampling_steps, sampler_name, st.session_state["RealESRGAN_model"], batch_count, batch_size,
|
||||
cfg_scale, seed, height, width, separate_prompts, normalize_prompt_weights, save_individual_images,
|
||||
save_grid, group_by_prompt, save_as_jpg, use_GFPGAN, use_RealESRGAN, RealESRGAN_model, fp=st.session_state.defaults.general.fp,
|
||||
variant_amount=variant_amount, variant_seed=variant_seed, write_info_files=write_info_files)
|
||||
save_grid, group_by_prompt, save_as_jpg, st.session_state["use_GFPGAN"], st.session_state["use_RealESRGAN"], st.session_state["RealESRGAN_model"],
|
||||
fp=st.session_state.defaults.general.fp, variant_amount=variant_amount, variant_seed=variant_seed, write_info_files=write_info_files)
|
||||
|
||||
message.success('Render Complete: ' + info + '; Stats: ' + stats, icon="✅")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user