mirror of
https://github.com/sd-webui/stable-diffusion-webui.git
synced 2024-12-14 14:52:31 +03:00
Merge pull request #1124 from ZeroCool940711/dev
Modified the way the defaults settings are loaded from the config file.
This commit is contained in:
commit
eb85dc4d63
@ -1,5 +1,4 @@
|
||||
import streamlit as st
|
||||
from webui_streamlit import defaults
|
||||
from webui_streamlit import st
|
||||
|
||||
# The global settings section will be moved to the Settings page.
|
||||
#with st.expander("Global Settings:"):
|
||||
|
@ -1,4 +1,4 @@
|
||||
from webui_streamlit import st
|
||||
from webui_streamlit import st, defaults
|
||||
from sd_utils import *
|
||||
import os
|
||||
from PIL import Image
|
||||
|
@ -43,7 +43,7 @@ def img2img(prompt: str = '', init_info: any = None, init_info_mask: any = None,
|
||||
random_seed_loopback: bool = False
|
||||
):
|
||||
|
||||
outpath = defaults.general.outdir_img2img or defaults.general.outdir or "outputs/img2img-samples"
|
||||
outpath = st.session_state['defaults'].general.outdir_img2img or st.session_state['defaults'].general.outdir or "outputs/img2img-samples"
|
||||
#err = False
|
||||
#loopback = False
|
||||
#skip_save = False
|
||||
@ -162,14 +162,14 @@ def img2img(prompt: str = '', init_info: any = None, init_info_mask: any = None,
|
||||
mask = mask[None].transpose(0, 1, 2, 3)
|
||||
mask = torch.from_numpy(mask).to(st.session_state["device"])
|
||||
|
||||
if defaults.general.optimized:
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
modelFS.to(st.session_state["device"] )
|
||||
|
||||
init_image = 2. * image - 1.
|
||||
init_image = init_image.to(st.session_state["device"])
|
||||
init_latent = (st.session_state["model"] if not defaults.general.optimized else modelFS).get_first_stage_encoding((st.session_state["model"] if not defaults.general.optimized else modelFS).encode_first_stage(init_image)) # move to latent space
|
||||
init_latent = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelFS).get_first_stage_encoding((st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelFS).encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
if defaults.general.optimized:
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
mem = torch.cuda.memory_allocated()/1e6
|
||||
modelFS.to("cpu")
|
||||
while(torch.cuda.memory_allocated()/1e6 >= mem):
|
||||
@ -376,7 +376,7 @@ def layout():
|
||||
#folder then we show a menu to select which model we want to use, otherwise we use the main model for SD
|
||||
if st.session_state["CustomModel_available"]:
|
||||
st.session_state["custom_model"] = st.selectbox("Custom Model:", st.session_state["custom_models"],
|
||||
index=st.session_state["custom_models"].index(defaults.general.default_model),
|
||||
index=st.session_state["custom_models"].index(st.session_state['defaults'].general.default_model),
|
||||
help="Select the model you want to use. This option is only available if you have custom models \
|
||||
on your 'models/custom' folder. The model name that will be shown here is the same as the name\
|
||||
the file for the model has on said folder, it is recommended to give the .ckpt file a name that \
|
||||
@ -385,11 +385,11 @@ def layout():
|
||||
st.session_state["custom_model"] = "Stable Diffusion v1.4"
|
||||
|
||||
|
||||
st.session_state["sampling_steps"] = st.slider("Sampling Steps", value=defaults.img2img.sampling_steps, min_value=1, max_value=500)
|
||||
st.session_state["sampling_steps"] = st.slider("Sampling Steps", value=st.session_state['defaults'].img2img.sampling_steps, min_value=1, max_value=500)
|
||||
|
||||
sampler_name_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
|
||||
st.session_state["sampler_name"] = st.selectbox("Sampling method",sampler_name_list,
|
||||
index=sampler_name_list.index(defaults.img2img.sampler_name), help="Sampling method to use.")
|
||||
index=sampler_name_list.index(st.session_state['defaults'].img2img.sampler_name), help="Sampling method to use.")
|
||||
|
||||
mask_mode_list = ["Mask", "Inverted mask", "Image alpha"]
|
||||
mask_mode = st.selectbox("Mask Mode", mask_mode_list,
|
||||
@ -398,9 +398,9 @@ def layout():
|
||||
)
|
||||
mask_mode = mask_mode_list.index(mask_mode)
|
||||
|
||||
width = st.slider("Width:", min_value=64, max_value=1024, value=defaults.img2img.width, step=64)
|
||||
height = st.slider("Height:", min_value=64, max_value=1024, value=defaults.img2img.height, step=64)
|
||||
seed = st.text_input("Seed:", value=defaults.img2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
width = st.slider("Width:", min_value=64, max_value=1024, value=st.session_state['defaults'].img2img.width, step=64)
|
||||
height = st.slider("Height:", min_value=64, max_value=1024, value=st.session_state['defaults'].img2img.height, step=64)
|
||||
seed = st.text_input("Seed:", value=st.session_state['defaults'].img2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
noise_mode_list = ["Seed", "Find Noise", "Matched Noise", "Find+Matched Noise"]
|
||||
noise_mode = st.selectbox(
|
||||
"Noise Mode", noise_mode_list,
|
||||
@ -408,38 +408,38 @@ def layout():
|
||||
)
|
||||
noise_mode = noise_mode_list.index(noise_mode)
|
||||
find_noise_steps = st.slider("Find Noise Steps", value=100, min_value=1, max_value=500)
|
||||
batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=defaults.img2img.batch_count, step=1,
|
||||
batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=st.session_state['defaults'].img2img.batch_count, step=1,
|
||||
help="How many iterations or batches of images to generate in total.")
|
||||
|
||||
#
|
||||
with st.expander("Advanced"):
|
||||
separate_prompts = st.checkbox("Create Prompt Matrix.", value=defaults.img2img.separate_prompts,
|
||||
separate_prompts = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].img2img.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
normalize_prompt_weights = st.checkbox("Normalize Prompt Weights.", value=defaults.img2img.normalize_prompt_weights,
|
||||
normalize_prompt_weights = st.checkbox("Normalize Prompt Weights.", value=st.session_state['defaults'].img2img.normalize_prompt_weights,
|
||||
help="Ensure the sum of all weights add up to 1.0")
|
||||
loopback = st.checkbox("Loopback.", value=defaults.img2img.loopback, help="Use images from previous batch when creating next batch.")
|
||||
random_seed_loopback = st.checkbox("Random loopback seed.", value=defaults.img2img.random_seed_loopback, help="Random loopback seed")
|
||||
loopback = st.checkbox("Loopback.", value=st.session_state['defaults'].img2img.loopback, help="Use images from previous batch when creating next batch.")
|
||||
random_seed_loopback = st.checkbox("Random loopback seed.", value=st.session_state['defaults'].img2img.random_seed_loopback, help="Random loopback seed")
|
||||
img2img_mask_restore = st.checkbox("Only modify regenerated parts of image",
|
||||
value=defaults.img2img.mask_restore,
|
||||
value=st.session_state['defaults'].img2img.mask_restore,
|
||||
help="Enable to restore the unmasked parts of the image with the input, may not blend as well but preserves detail")
|
||||
save_individual_images = st.checkbox("Save individual images.", value=defaults.img2img.save_individual_images,
|
||||
save_individual_images = st.checkbox("Save individual images.", value=st.session_state['defaults'].img2img.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
save_grid = st.checkbox("Save grid",value=defaults.img2img.save_grid, help="Save a grid with all the images generated into a single image.")
|
||||
group_by_prompt = st.checkbox("Group results by prompt", value=defaults.img2img.group_by_prompt,
|
||||
save_grid = st.checkbox("Save grid",value=st.session_state['defaults'].img2img.save_grid, help="Save a grid with all the images generated into a single image.")
|
||||
group_by_prompt = st.checkbox("Group results by prompt", value=st.session_state['defaults'].img2img.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. \
|
||||
When using a prompt matrix each prompt combination will have its own folder.")
|
||||
write_info_files = st.checkbox("Write Info file", value=defaults.img2img.write_info_files,
|
||||
write_info_files = st.checkbox("Write Info file", value=st.session_state['defaults'].img2img.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=defaults.img2img.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].img2img.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
|
||||
if st.session_state["GFPGAN_available"]:
|
||||
use_GFPGAN = st.checkbox("Use GFPGAN", value=defaults.img2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation.\
|
||||
use_GFPGAN = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].img2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation.\
|
||||
This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
else:
|
||||
use_GFPGAN = False
|
||||
|
||||
if st.session_state["RealESRGAN_available"]:
|
||||
st.session_state["use_RealESRGAN"] = st.checkbox("Use RealESRGAN", value=defaults.img2img.use_RealESRGAN,
|
||||
st.session_state["use_RealESRGAN"] = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].img2img.use_RealESRGAN,
|
||||
help="Uses the RealESRGAN model to upscale the images after the generation.\
|
||||
This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
st.session_state["RealESRGAN_model"] = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
|
||||
@ -447,27 +447,27 @@ def layout():
|
||||
st.session_state["use_RealESRGAN"] = False
|
||||
st.session_state["RealESRGAN_model"] = "RealESRGAN_x4plus"
|
||||
|
||||
variant_amount = st.slider("Variant Amount:", value=defaults.img2img.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
variant_seed = st.text_input("Variant Seed:", value=defaults.img2img.variant_seed,
|
||||
variant_amount = st.slider("Variant Amount:", value=st.session_state['defaults'].img2img.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
variant_seed = st.text_input("Variant Seed:", value=st.session_state['defaults'].img2img.variant_seed,
|
||||
help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=1.0, max_value=30.0, value=defaults.img2img.cfg_scale, step=0.5,
|
||||
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=1.0, max_value=30.0, value=st.session_state['defaults'].img2img.cfg_scale, step=0.5,
|
||||
help="How strongly the image should follow the prompt.")
|
||||
batch_size = st.slider("Batch size", min_value=1, max_value=100, value=defaults.img2img.batch_size, step=1,
|
||||
batch_size = st.slider("Batch size", min_value=1, max_value=100, value=st.session_state['defaults'].img2img.batch_size, step=1,
|
||||
help="How many images are at once in a batch.\
|
||||
It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish \
|
||||
generation as more images are generated at once.\
|
||||
Default: 1")
|
||||
|
||||
st.session_state["denoising_strength"] = st.slider("Denoising Strength:", value=defaults.img2img.denoising_strength,
|
||||
st.session_state["denoising_strength"] = st.slider("Denoising Strength:", value=st.session_state['defaults'].img2img.denoising_strength,
|
||||
min_value=0.01, max_value=1.0, step=0.01)
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=defaults.img2img.update_preview,
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].img2img.update_preview,
|
||||
help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=defaults.img2img.update_preview_frequency,
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=st.session_state['defaults'].img2img.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
|
||||
@ -569,7 +569,7 @@ def layout():
|
||||
sampler_name=st.session_state["sampler_name"], n_iter=batch_count,
|
||||
cfg_scale=cfg_scale, denoising_strength=st.session_state["denoising_strength"], variant_seed=variant_seed,
|
||||
seed=seed, noise_mode=noise_mode, find_noise_steps=find_noise_steps, width=width,
|
||||
height=height, fp=defaults.general.fp, variant_amount=variant_amount,
|
||||
height=height, fp=st.session_state['defaults'].general.fp, variant_amount=variant_amount,
|
||||
ddim_eta=0.0, write_info_files=write_info_files, RealESRGAN_model=st.session_state["RealESRGAN_model"],
|
||||
separate_prompts=separate_prompts, normalize_prompt_weights=normalize_prompt_weights,
|
||||
save_individual_images=save_individual_images, save_grid=save_grid,
|
||||
|
@ -1,5 +1,5 @@
|
||||
# base webui import and utils.
|
||||
from webui_streamlit import st, defaults
|
||||
from webui_streamlit import st
|
||||
from sd_utils import *
|
||||
|
||||
#home plugin
|
||||
|
@ -1,5 +1,5 @@
|
||||
# base webui import and utils.
|
||||
from webui_streamlit import st, defaults
|
||||
from webui_streamlit import st
|
||||
|
||||
|
||||
# streamlit imports
|
||||
@ -66,7 +66,7 @@ opt_C = 4
|
||||
opt_f = 8
|
||||
|
||||
# should and will be moved to a settings menu in the UI at some point
|
||||
grid_format = [s.lower() for s in defaults.general.grid_format.split(':')]
|
||||
grid_format = [s.lower() for s in st.session_state["defaults"].general.grid_format.split(':')]
|
||||
grid_lossless = False
|
||||
grid_quality = 100
|
||||
if grid_format[0] == 'png':
|
||||
@ -85,7 +85,7 @@ elif grid_format[0] == 'webp':
|
||||
grid_quality = abs(grid_quality)
|
||||
|
||||
# should and will be moved to a settings menu in the UI at some point
|
||||
save_format = [s.lower() for s in defaults.general.save_format.split(':')]
|
||||
save_format = [s.lower() for s in st.session_state["defaults"].general.save_format.split(':')]
|
||||
save_lossless = False
|
||||
save_quality = 100
|
||||
if save_format[0] == 'png':
|
||||
@ -105,7 +105,7 @@ elif save_format[0] == 'webp':
|
||||
|
||||
# this should force GFPGAN and RealESRGAN onto the selected gpu as well
|
||||
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(defaults.general.gpu)
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(st.session_state["defaults"].general.gpu)
|
||||
|
||||
@retry(tries=5)
|
||||
def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=False, RealESRGAN_model="RealESRGAN_x4plus",
|
||||
@ -130,7 +130,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
|
||||
print("GFPGAN already loaded")
|
||||
else:
|
||||
# Load GFPGAN
|
||||
if os.path.exists(defaults.general.GFPGAN_dir):
|
||||
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
|
||||
try:
|
||||
st.session_state["GFPGAN"] = load_GFPGAN()
|
||||
print("Loaded GFPGAN")
|
||||
@ -154,7 +154,7 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if os.path.exists(defaults.general.RealESRGAN_dir):
|
||||
if os.path.exists(st.session_state["defaults"].general.RealESRGAN_dir):
|
||||
# st.session_state is used for keeping the models in memory across multiple pages or runs.
|
||||
st.session_state["RealESRGAN"] = load_RealESRGAN(RealESRGAN_model)
|
||||
print("Loaded RealESRGAN with model "+ st.session_state["RealESRGAN"].model.name)
|
||||
@ -174,27 +174,27 @@ def load_models(continue_prev_run = False, use_GFPGAN=False, use_RealESRGAN=Fals
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
config = OmegaConf.load(defaults.general.default_model_config)
|
||||
config = OmegaConf.load(st.session_state["defaults"].general.default_model_config)
|
||||
|
||||
if custom_model == defaults.general.default_model:
|
||||
model = load_model_from_config(config, defaults.general.default_model_path)
|
||||
if custom_model == st.session_state["defaults"].general.default_model:
|
||||
model = load_model_from_config(config, st.session_state["defaults"].general.default_model_path)
|
||||
else:
|
||||
model = load_model_from_config(config, os.path.join("models","custom", f"{custom_model}.ckpt"))
|
||||
|
||||
st.session_state["custom_model"] = custom_model
|
||||
st.session_state["device"] = torch.device(f"cuda:{defaults.general.gpu}") if torch.cuda.is_available() else torch.device("cpu")
|
||||
st.session_state["model"] = (model if defaults.general.no_half else model.half()).to(st.session_state["device"] )
|
||||
st.session_state["model"] = (model if st.session_state["defaults"].general.no_half else model.half()).to(st.session_state["device"] )
|
||||
else:
|
||||
config = OmegaConf.load(defaults.general.default_model_config)
|
||||
config = OmegaConf.load(st.session_state["defaults"].general.default_model_config)
|
||||
|
||||
if custom_model == defaults.general.default_model:
|
||||
model = load_model_from_config(config, defaults.general.default_model_path)
|
||||
if custom_model == st.session_state["defaults"].general.default_model:
|
||||
model = load_model_from_config(config, st.session_state["defaults"].general.default_model_path)
|
||||
else:
|
||||
model = load_model_from_config(config, os.path.join("models","custom", f"{custom_model}.ckpt"))
|
||||
|
||||
st.session_state["custom_model"] = custom_model
|
||||
st.session_state["device"] = torch.device(f"cuda:{defaults.general.gpu}") if torch.cuda.is_available() else torch.device("cpu")
|
||||
st.session_state["model"] = (model if defaults.general.no_half else model.half()).to(st.session_state["device"] )
|
||||
st.session_state["device"] = torch.device(f"cuda:{st.session_state['defaults'].general.gpu}") if torch.cuda.is_available() else torch.device("cpu")
|
||||
st.session_state["model"] = (model if st.session_state['defaults'].general.no_half else model.half()).to(st.session_state["device"] )
|
||||
|
||||
print("Model loaded.")
|
||||
|
||||
@ -245,7 +245,7 @@ class MemUsageMonitor(threading.Thread):
|
||||
print(f"[{self.name}] Unable to initialize NVIDIA management. No memory stats. \n")
|
||||
return
|
||||
print(f"[{self.name}] Recording max memory usage...\n")
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(defaults.general.gpu)
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(st.session_state['defaults'].general.gpu)
|
||||
self.total = pynvml.nvmlDeviceGetMemoryInfo(handle).total
|
||||
while not self.stop_flag:
|
||||
m = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
||||
@ -592,7 +592,7 @@ def create_random_tensors(shape, seeds):
|
||||
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
|
||||
# but the original script had it like this so i do not dare change it for now because
|
||||
# it will break everyone's seeds.
|
||||
xs.append(torch.randn(shape, device=defaults.general.gpu))
|
||||
xs.append(torch.randn(shape, device=st.session_state['defaults'].general.gpu))
|
||||
x = torch.stack(xs)
|
||||
return x
|
||||
|
||||
@ -602,19 +602,19 @@ def torch_gc():
|
||||
|
||||
def load_GFPGAN():
|
||||
model_name = 'GFPGANv1.3'
|
||||
model_path = os.path.join(defaults.general.GFPGAN_dir, 'experiments/pretrained_models', model_name + '.pth')
|
||||
model_path = os.path.join(st.session_state['defaults'].general.GFPGAN_dir, 'experiments/pretrained_models', model_name + '.pth')
|
||||
if not os.path.isfile(model_path):
|
||||
raise Exception("GFPGAN model not found at path "+model_path)
|
||||
|
||||
sys.path.append(os.path.abspath(defaults.general.GFPGAN_dir))
|
||||
sys.path.append(os.path.abspath(st.session_state['defaults'].general.GFPGAN_dir))
|
||||
from gfpgan import GFPGANer
|
||||
|
||||
if defaults.general.gfpgan_cpu or defaults.general.extra_models_cpu:
|
||||
if st.session_state['defaults'].general.gfpgan_cpu or st.session_state['defaults'].general.extra_models_cpu:
|
||||
instance = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cpu'))
|
||||
elif defaults.general.extra_models_gpu:
|
||||
instance = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device(f'cuda:{defaults.general.gfpgan_gpu}'))
|
||||
elif st.session_state['defaults'].general.extra_models_gpu:
|
||||
instance = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device(f"cuda:{st.session_state['defaults'].general.gfpgan_gpu}"))
|
||||
else:
|
||||
instance = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device(f'cuda:{defaults.general.gpu}'))
|
||||
instance = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device(f"cuda:{st.session_state['defaults'].general.gpu}"))
|
||||
return instance
|
||||
|
||||
def load_RealESRGAN(model_name: str):
|
||||
@ -624,21 +624,21 @@ def load_RealESRGAN(model_name: str):
|
||||
'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
||||
}
|
||||
|
||||
model_path = os.path.join(defaults.general.RealESRGAN_dir, 'experiments/pretrained_models', model_name + '.pth')
|
||||
if not os.path.exists(os.path.join(defaults.general.RealESRGAN_dir, "experiments","pretrained_models", f"{model_name}.pth")):
|
||||
model_path = os.path.join(st.session_state['defaults'].general.RealESRGAN_dir, 'experiments/pretrained_models', model_name + '.pth')
|
||||
if not os.path.exists(os.path.join(st.session_state['defaults'].general.RealESRGAN_dir, "experiments","pretrained_models", f"{model_name}.pth")):
|
||||
raise Exception(model_name+".pth not found at path "+model_path)
|
||||
|
||||
sys.path.append(os.path.abspath(defaults.general.RealESRGAN_dir))
|
||||
sys.path.append(os.path.abspath(st.session_state['defaults'].general.RealESRGAN_dir))
|
||||
from realesrgan import RealESRGANer
|
||||
|
||||
if defaults.general.esrgan_cpu or defaults.general.extra_models_cpu:
|
||||
if st.session_state['defaults'].general.esrgan_cpu or st.session_state['defaults'].general.extra_models_cpu:
|
||||
instance = RealESRGANer(scale=2, model_path=model_path, model=RealESRGAN_models[model_name], pre_pad=0, half=False) # cpu does not support half
|
||||
instance.device = torch.device('cpu')
|
||||
instance.model.to('cpu')
|
||||
elif defaults.general.extra_models_gpu:
|
||||
instance = RealESRGANer(scale=2, model_path=model_path, model=RealESRGAN_models[model_name], pre_pad=0, half=not defaults.general.no_half, device=torch.device(f'cuda:{defaults.general.esrgan_gpu}'))
|
||||
elif st.session_state['defaults'].general.extra_models_gpu:
|
||||
instance = RealESRGANer(scale=2, model_path=model_path, model=RealESRGAN_models[model_name], pre_pad=0, half=not st.session_state['defaults'].general.no_half, device=torch.device(f"cuda:{st.session_state['defaults'].general.esrgan_gpu}"))
|
||||
else:
|
||||
instance = RealESRGANer(scale=2, model_path=model_path, model=RealESRGAN_models[model_name], pre_pad=0, half=not defaults.general.no_half, device=torch.device(f'cuda:{defaults.general.gpu}'))
|
||||
instance = RealESRGANer(scale=2, model_path=model_path, model=RealESRGAN_models[model_name], pre_pad=0, half=not st.session_state['defaults'].general.no_half, device=torch.device(f"cuda:{st.session_state['defaults'].general.gpu}"))
|
||||
instance.model.name = model_name
|
||||
|
||||
return instance
|
||||
@ -647,8 +647,8 @@ def load_RealESRGAN(model_name: str):
|
||||
def load_LDSR(checking=False):
|
||||
model_name = 'model'
|
||||
yaml_name = 'project'
|
||||
model_path = os.path.join(defaults.general.LDSR_dir, 'experiments/pretrained_models', model_name + '.ckpt')
|
||||
yaml_path = os.path.join(defaults.general.LDSR_dir, 'experiments/pretrained_models', yaml_name + '.yaml')
|
||||
model_path = os.path.join(st.session_state['defaults'].general.LDSR_dir, 'experiments/pretrained_models', model_name + '.ckpt')
|
||||
yaml_path = os.path.join(st.session_state['defaults'].general.LDSR_dir, 'experiments/pretrained_models', yaml_name + '.yaml')
|
||||
if not os.path.isfile(model_path):
|
||||
raise Exception("LDSR model not found at path "+model_path)
|
||||
if not os.path.isfile(yaml_path):
|
||||
@ -656,7 +656,7 @@ def load_LDSR(checking=False):
|
||||
if checking == True:
|
||||
return True
|
||||
|
||||
sys.path.append(os.path.abspath(defaults.general.LDSR_dir))
|
||||
sys.path.append(os.path.abspath(st.session_state['defaults'].general.LDSR_dir))
|
||||
from LDSR import LDSR
|
||||
LDSRObject = LDSR(model_path, yaml_path)
|
||||
return LDSRObject
|
||||
@ -665,7 +665,7 @@ def load_LDSR(checking=False):
|
||||
LDSR = None
|
||||
def try_loading_LDSR(model_name: str,checking=False):
|
||||
global LDSR
|
||||
if os.path.exists(defaults.general.LDSR_dir):
|
||||
if os.path.exists(st.session_state['defaults'].general.LDSR_dir):
|
||||
try:
|
||||
LDSR = load_LDSR(checking=True) # TODO: Should try to load both models before giving up
|
||||
if checking == True:
|
||||
@ -682,8 +682,8 @@ def try_loading_LDSR(model_name: str,checking=False):
|
||||
#try_loading_LDSR('model',checking=True)
|
||||
|
||||
def load_SD_model():
|
||||
if defaults.general.optimized:
|
||||
sd = load_sd_from_config(defaults.general.default_model_path)
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
sd = load_sd_from_config(st.session_state['defaults'].general.default_model_path)
|
||||
li, lo = [], []
|
||||
for key, v_ in sd.items():
|
||||
sp = key.split('.')
|
||||
@ -708,7 +708,7 @@ def load_SD_model():
|
||||
_, _ = model.load_state_dict(sd, strict=False)
|
||||
model.cuda()
|
||||
model.eval()
|
||||
model.turbo = defaults.general.optimized_turbo
|
||||
model.turbo = st.session_state['defaults'].general.optimized_turbo
|
||||
|
||||
modelCS = instantiate_from_config(config.modelCondStage)
|
||||
_, _ = modelCS.load_state_dict(sd, strict=False)
|
||||
@ -721,17 +721,17 @@ def load_SD_model():
|
||||
|
||||
del sd
|
||||
|
||||
if not defaults.general.no_half:
|
||||
if not st.session_state['defaults'].general.no_half:
|
||||
model = model.half()
|
||||
modelCS = modelCS.half()
|
||||
modelFS = modelFS.half()
|
||||
return model,modelCS,modelFS,device, config
|
||||
else:
|
||||
config = OmegaConf.load(defaults.general.default_model_config)
|
||||
model = load_model_from_config(config, defaults.general.default_model_path)
|
||||
config = OmegaConf.load(st.session_state['defaults'].general.default_model_config)
|
||||
model = load_model_from_config(config, st.session_state['defaults'].general.default_model_path)
|
||||
|
||||
device = torch.device(f"cuda:{opt.gpu}") if torch.cuda.is_available() else torch.device("cpu")
|
||||
model = (model if defaults.general.no_half else model.half()).to(device)
|
||||
model = (model if st.session_state['defaults'].general.no_half else model.half()).to(device)
|
||||
return model, device,config
|
||||
|
||||
#
|
||||
@ -746,7 +746,7 @@ def ModelLoader(models,load=False,unload=False,imgproc_realesrgan_model_name='Re
|
||||
if m in global_vars:
|
||||
#if it is, delete it
|
||||
del global_vars[m]
|
||||
if defaults.general.optimized:
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
if m == 'model':
|
||||
del global_vars[m+'FS']
|
||||
del global_vars[m+'CS']
|
||||
@ -762,7 +762,7 @@ def ModelLoader(models,load=False,unload=False,imgproc_realesrgan_model_name='Re
|
||||
elif m == 'model':
|
||||
sdLoader = load_sd_from_config()
|
||||
global_vars[m] = sdLoader[0]
|
||||
if defaults.general.optimized:
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
global_vars[m+'CS'] = sdLoader[1]
|
||||
global_vars[m+'FS'] = sdLoader[2]
|
||||
elif m == 'RealESRGAN':
|
||||
@ -785,18 +785,18 @@ def generation_callback(img, i=0):
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
if i % int(defaults.general.update_preview_frequency) == 0 and defaults.general.update_preview:
|
||||
if i % int(st.session_state['defaults'].general.update_preview_frequency) == 0 and st.session_state['defaults'].general.update_preview:
|
||||
#print (img)
|
||||
#print (type(img))
|
||||
# The following lines will convert the tensor we got on img to an actual image we can render on the UI.
|
||||
# It can probably be done in a better way for someone who knows what they're doing. I don't.
|
||||
#print (img,isinstance(img, torch.Tensor))
|
||||
if isinstance(img, torch.Tensor):
|
||||
x_samples_ddim = (st.session_state["model"] if not defaults.general.optimized else modelFS).decode_first_stage(img)
|
||||
x_samples_ddim = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelFS).decode_first_stage(img)
|
||||
else:
|
||||
# When using the k Diffusion samplers they return a dict instead of a tensor that look like this:
|
||||
# {'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}
|
||||
x_samples_ddim = (st.session_state["model"] if not defaults.general.optimized else modelFS).decode_first_stage(img["denoised"])
|
||||
x_samples_ddim = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelFS).decode_first_stage(img["denoised"])
|
||||
|
||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
@ -912,9 +912,9 @@ def image_grid(imgs, batch_size, force_n_rows=None, captions=None):
|
||||
#print (len(imgs))
|
||||
if force_n_rows is not None:
|
||||
rows = force_n_rows
|
||||
elif defaults.general.n_rows > 0:
|
||||
rows = defaults.general.n_rows
|
||||
elif defaults.general.n_rows == 0:
|
||||
elif st.session_state['defaults'].general.n_rows > 0:
|
||||
rows = st.session_state['defaults'].general.n_rows
|
||||
elif st.session_state['defaults'].general.n_rows == 0:
|
||||
rows = batch_size
|
||||
else:
|
||||
rows = math.sqrt(len(imgs))
|
||||
@ -1025,10 +1025,10 @@ def draw_prompt_matrix(im, width, height, all_prompts):
|
||||
def check_prompt_length(prompt, comments):
|
||||
"""this function tests if prompt is too long, and if so, adds a message to comments"""
|
||||
|
||||
tokenizer = (st.session_state["model"] if not defaults.general.optimized else modelCS).cond_stage_model.tokenizer
|
||||
max_length = (st.session_state["model"] if not defaults.general.optimized else modelCS).cond_stage_model.max_length
|
||||
tokenizer = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelCS).cond_stage_model.tokenizer
|
||||
max_length = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelCS).cond_stage_model.max_length
|
||||
|
||||
info = (st.session_state["model"] if not defaults.general.optimized else modelCS).cond_stage_model.tokenizer([prompt], truncation=True, max_length=max_length,
|
||||
info = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelCS).cond_stage_model.tokenizer([prompt], truncation=True, max_length=max_length,
|
||||
return_overflowing_tokens=True, padding="max_length", return_tensors="pt")
|
||||
ovf = info['overflowing_tokens'][0]
|
||||
overflowing_count = ovf.shape[0]
|
||||
@ -1047,7 +1047,7 @@ def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, widt
|
||||
|
||||
filename_i = os.path.join(sample_path_i, filename)
|
||||
|
||||
if defaults.general.save_metadata or write_info_files:
|
||||
if st.session_state['defaults'].general.save_metadata or write_info_files:
|
||||
# toggles differ for txt2img vs. img2img:
|
||||
offset = 0 if init_img is None else 2
|
||||
toggles = []
|
||||
@ -1087,7 +1087,7 @@ def save_sample(image, sample_path_i, filename, jpg_sample, prompts, seeds, widt
|
||||
with open(f"{filename_i}.yaml", "w", encoding="utf8") as f:
|
||||
yaml.dump(metadata, f, allow_unicode=True, width=10000)
|
||||
|
||||
if defaults.general.save_metadata:
|
||||
if st.session_state['defaults'].general.save_metadata:
|
||||
# metadata = {
|
||||
# "SD:prompt": prompts[i],
|
||||
# "SD:seed": str(seeds[i]),
|
||||
@ -1281,7 +1281,7 @@ def process_images(
|
||||
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {n_iter} batches.")
|
||||
else:
|
||||
|
||||
if not defaults.general.no_verify_input:
|
||||
if not st.session_state['defaults'].general.no_verify_input:
|
||||
try:
|
||||
check_prompt_length(prompt, comments)
|
||||
except:
|
||||
@ -1292,11 +1292,11 @@ def process_images(
|
||||
all_prompts = batch_size * n_iter * [prompt]
|
||||
all_seeds = [seed + x for x in range(len(all_prompts))]
|
||||
|
||||
precision_scope = autocast if defaults.general.precision == "autocast" else nullcontext
|
||||
precision_scope = autocast if st.session_state['defaults'].general.precision == "autocast" else nullcontext
|
||||
output_images = []
|
||||
grid_captions = []
|
||||
stats = []
|
||||
with torch.no_grad(), precision_scope("cuda"), (st.session_state["model"].ema_scope() if not defaults.general.optimized else nullcontext()):
|
||||
with torch.no_grad(), precision_scope("cuda"), (st.session_state["model"].ema_scope() if not st.session_state['defaults'].general.optimized else nullcontext()):
|
||||
init_data = func_init()
|
||||
tic = time.time()
|
||||
|
||||
@ -1321,10 +1321,10 @@ def process_images(
|
||||
|
||||
print(prompt)
|
||||
|
||||
if defaults.general.optimized:
|
||||
modelCS.to(defaults.general.gpu)
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
modelCS.to(st.session_state['defaults'].general.gpu)
|
||||
|
||||
uc = (st.session_state["model"] if not defaults.general.optimized else modelCS).get_learned_conditioning(len(prompts) * [""])
|
||||
uc = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelCS).get_learned_conditioning(len(prompts) * [""])
|
||||
|
||||
if isinstance(prompts, tuple):
|
||||
prompts = list(prompts)
|
||||
@ -1338,14 +1338,14 @@ def process_images(
|
||||
c = torch.zeros_like(uc) # i dont know if this is correct.. but it works
|
||||
for i in range(0, len(weighted_subprompts)):
|
||||
# note if alpha negative, it functions same as torch.sub
|
||||
c = torch.add(c, (st.session_state["model"] if not defaults.general.optimized else modelCS).get_learned_conditioning(weighted_subprompts[i][0]), alpha=weighted_subprompts[i][1])
|
||||
c = torch.add(c, (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelCS).get_learned_conditioning(weighted_subprompts[i][0]), alpha=weighted_subprompts[i][1])
|
||||
else: # just behave like usual
|
||||
c = (st.session_state["model"] if not defaults.general.optimized else modelCS).get_learned_conditioning(prompts)
|
||||
c = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelCS).get_learned_conditioning(prompts)
|
||||
|
||||
|
||||
shape = [opt_C, height // opt_f, width // opt_f]
|
||||
|
||||
if defaults.general.optimized:
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
mem = torch.cuda.memory_allocated()/1e6
|
||||
modelCS.to("cpu")
|
||||
while(torch.cuda.memory_allocated()/1e6 >= mem):
|
||||
@ -1371,14 +1371,14 @@ def process_images(
|
||||
torch.manual_seed(specified_variant_seed)
|
||||
seeds = [specified_variant_seed]
|
||||
# finally, slerp base_x noise to target_x noise for creating a variant
|
||||
x = slerp(defaults.general.gpu, max(0.0, min(1.0, variant_amount)), base_x, x)
|
||||
x = slerp(st.session_state['defaults'].general.gpu, max(0.0, min(1.0, variant_amount)), base_x, x)
|
||||
|
||||
samples_ddim = func_sample(init_data=init_data, x=x, conditioning=c, unconditional_conditioning=uc, sampler_name=sampler_name)
|
||||
|
||||
if defaults.general.optimized:
|
||||
modelFS.to(defaults.general.gpu)
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
modelFS.to(st.session_state['defaults'].general.gpu)
|
||||
|
||||
x_samples_ddim = (st.session_state["model"] if not defaults.general.optimized else modelFS).decode_first_stage(samples_ddim)
|
||||
x_samples_ddim = (st.session_state["model"] if not st.session_state['defaults'].general.optimized else modelFS).decode_first_stage(samples_ddim)
|
||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
for i, x_sample in enumerate(x_samples_ddim):
|
||||
@ -1510,7 +1510,7 @@ def process_images(
|
||||
#if simple_templating:
|
||||
#grid_captions.append( captions[i] )
|
||||
|
||||
if defaults.general.optimized:
|
||||
if st.session_state['defaults'].general.optimized:
|
||||
mem = torch.cuda.memory_allocated()/1e6
|
||||
modelFS.to("cpu")
|
||||
while(torch.cuda.memory_allocated()/1e6 >= mem):
|
||||
|
@ -36,12 +36,12 @@ class plugin_info():
|
||||
displayPriority = 1
|
||||
|
||||
|
||||
if os.path.exists(os.path.join(defaults.general.GFPGAN_dir, "experiments", "pretrained_models", "GFPGANv1.3.pth")):
|
||||
if os.path.exists(os.path.join(st.session_state['defaults'].general.GFPGAN_dir, "experiments", "pretrained_models", "GFPGANv1.3.pth")):
|
||||
GFPGAN_available = True
|
||||
else:
|
||||
GFPGAN_available = False
|
||||
|
||||
if os.path.exists(os.path.join(defaults.general.RealESRGAN_dir, "experiments","pretrained_models", f"{defaults.general.RealESRGAN_model}.pth")):
|
||||
if os.path.exists(os.path.join(st.session_state['defaults'].general.RealESRGAN_dir, "experiments","pretrained_models", f"{st.session_state['defaults'].general.RealESRGAN_model}.pth")):
|
||||
RealESRGAN_available = True
|
||||
else:
|
||||
RealESRGAN_available = False
|
||||
@ -55,7 +55,7 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, realesrgan_model_na
|
||||
RealESRGAN_model: str = "RealESRGAN_x4plus_anime_6B", fp = None, variant_amount: float = None,
|
||||
variant_seed: int = None, ddim_eta:float = 0.0, write_info_files:bool = True):
|
||||
|
||||
outpath = defaults.general.outdir_txt2img or defaults.general.outdir or "outputs/txt2img-samples"
|
||||
outpath = st.session_state['defaults'].general.outdir_txt2img or st.session_state['defaults'].general.outdir or "outputs/txt2img-samples"
|
||||
|
||||
seed = seed_to_int(seed)
|
||||
|
||||
@ -94,7 +94,7 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, realesrgan_model_na
|
||||
def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name):
|
||||
samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=cfg_scale,
|
||||
unconditional_conditioning=unconditional_conditioning, eta=ddim_eta, x_T=x, img_callback=generation_callback,
|
||||
log_every_t=int(defaults.general.update_preview_frequency))
|
||||
log_every_t=int(st.session_state['defaults'].general.update_preview_frequency))
|
||||
|
||||
return samples_ddim
|
||||
|
||||
@ -157,23 +157,23 @@ def layout():
|
||||
col1, col2, col3 = st.columns([1,2,1], gap="large")
|
||||
|
||||
with col1:
|
||||
width = st.slider("Width:", min_value=64, max_value=1024, value=defaults.txt2img.width, step=64)
|
||||
height = st.slider("Height:", min_value=64, max_value=1024, value=defaults.txt2img.height, step=64)
|
||||
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=1.0, max_value=30.0, value=defaults.txt2img.cfg_scale, step=0.5, help="How strongly the image should follow the prompt.")
|
||||
seed = st.text_input("Seed:", value=defaults.txt2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=defaults.txt2img.batch_count, step=1, help="How many iterations or batches of images to generate in total.")
|
||||
width = st.slider("Width:", min_value=64, max_value=1024, value=st.session_state['defaults'].txt2img.width, step=64)
|
||||
height = st.slider("Height:", min_value=64, max_value=1024, value=st.session_state['defaults'].txt2img.height, step=64)
|
||||
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=1.0, max_value=30.0, value=st.session_state['defaults'].txt2img.cfg_scale, step=0.5, help="How strongly the image should follow the prompt.")
|
||||
seed = st.text_input("Seed:", value=st.session_state['defaults'].txt2img.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=st.session_state['defaults'].txt2img.batch_count, step=1, help="How many iterations or batches of images to generate in total.")
|
||||
#batch_size = st.slider("Batch size", min_value=1, max_value=250, value=defaults.txt2img.batch_size, step=1,
|
||||
#help="How many images are at once in a batch.\
|
||||
#It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
#Default: 1")
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=defaults.txt2img.update_preview,
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].txt2img.update_preview,
|
||||
help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=defaults.txt2img.update_preview_frequency,
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=st.session_state['defaults'].txt2img.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
|
||||
@ -198,11 +198,11 @@ def layout():
|
||||
message = st.empty()
|
||||
|
||||
with col3:
|
||||
st.session_state.sampling_steps = st.slider("Sampling Steps", value=defaults.txt2img.sampling_steps, min_value=1, max_value=250)
|
||||
st.session_state.sampling_steps = st.slider("Sampling Steps", value=st.session_state['defaults'].txt2img.sampling_steps, min_value=1, max_value=250)
|
||||
|
||||
sampler_name_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
|
||||
sampler_name = st.selectbox("Sampling method", sampler_name_list,
|
||||
index=sampler_name_list.index(defaults.txt2img.default_sampler), help="Sampling method to use. Default: k_euler")
|
||||
index=sampler_name_list.index(st.session_state['defaults'].txt2img.default_sampler), help="Sampling method to use. Default: k_euler")
|
||||
|
||||
|
||||
|
||||
@ -223,19 +223,19 @@ def layout():
|
||||
save_as_jpg = st.checkbox("Save samples as jpg", value=False, help="Saves the images as jpg instead of png.")
|
||||
|
||||
if GFPGAN_available:
|
||||
use_GFPGAN = st.checkbox("Use GFPGAN", value=defaults.txt2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation. This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
use_GFPGAN = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2img.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation. This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
else:
|
||||
use_GFPGAN = False
|
||||
|
||||
if RealESRGAN_available:
|
||||
use_RealESRGAN = st.checkbox("Use RealESRGAN", value=defaults.txt2img.use_RealESRGAN, help="Uses the RealESRGAN model to upscale the images after the generation. This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
use_RealESRGAN = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2img.use_RealESRGAN, help="Uses the RealESRGAN model to upscale the images after the generation. This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
RealESRGAN_model = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
|
||||
else:
|
||||
use_RealESRGAN = False
|
||||
RealESRGAN_model = "RealESRGAN_x4plus"
|
||||
|
||||
variant_amount = st.slider("Variant Amount:", value=defaults.txt2img.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
variant_seed = st.text_input("Variant Seed:", value=defaults.txt2img.seed, help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
variant_amount = st.slider("Variant Amount:", value=st.session_state['defaults'].txt2img.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
variant_seed = st.text_input("Variant Seed:", value=st.session_state['defaults'].txt2img.seed, help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
galleryCont = st.empty()
|
||||
|
||||
if generate_button:
|
||||
|
@ -44,12 +44,12 @@ class plugin_info():
|
||||
displayPriority = 1
|
||||
|
||||
|
||||
if os.path.exists(os.path.join(defaults.general.GFPGAN_dir, "experiments", "pretrained_models", "GFPGANv1.3.pth")):
|
||||
if os.path.exists(os.path.join(st.session_state['defaults'].general.GFPGAN_dir, "experiments", "pretrained_models", "GFPGANv1.3.pth")):
|
||||
GFPGAN_available = True
|
||||
else:
|
||||
GFPGAN_available = False
|
||||
|
||||
if os.path.exists(os.path.join(defaults.general.RealESRGAN_dir, "experiments","pretrained_models", f"{defaults.general.RealESRGAN_model}.pth")):
|
||||
if os.path.exists(os.path.join(st.session_state['defaults'].general.RealESRGAN_dir, "experiments","pretrained_models", f"{st.session_state['defaults'].general.RealESRGAN_model}.pth")):
|
||||
RealESRGAN_available = True
|
||||
else:
|
||||
RealESRGAN_available = False
|
||||
@ -128,14 +128,14 @@ def diffuse(
|
||||
|
||||
#print (st.session_state["update_preview_frequency"])
|
||||
#update the preview image if it is enabled and the frequency matches the step_counter
|
||||
if defaults.general.update_preview:
|
||||
if st.session_state['defaults'].general.update_preview:
|
||||
step_counter += 1
|
||||
|
||||
if st.session_state.dynamic_preview_frequency:
|
||||
current_chunk_speed, previous_chunk_speed, defaults.general.update_preview_frequency = optimize_update_preview_frequency(
|
||||
current_chunk_speed, previous_chunk_speed, defaults.general.update_preview_frequency)
|
||||
current_chunk_speed, previous_chunk_speed, st.session_state['defaults'].general.update_preview_frequency = optimize_update_preview_frequency(
|
||||
current_chunk_speed, previous_chunk_speed, st.session_state['defaults'].general.update_preview_frequency)
|
||||
|
||||
if defaults.general.update_preview_frequency == step_counter or step_counter == st.session_state.sampling_steps:
|
||||
if st.session_state['defaults'].general.update_preview_frequency == step_counter or step_counter == st.session_state.sampling_steps:
|
||||
#scale and decode the image latents with vae
|
||||
cond_latents_2 = 1 / 0.18215 * cond_latents
|
||||
image_2 = pipe.vae.decode(cond_latents_2)
|
||||
@ -193,9 +193,9 @@ def txt2vid(
|
||||
# --------------------------------------
|
||||
# args you probably want to change
|
||||
prompts = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
|
||||
gpu:int = defaults.general.gpu, # id of the gpu to run on
|
||||
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
|
||||
#name:str = 'test', # name of this project, for the output directory
|
||||
#rootdir:str = defaults.general.outdir,
|
||||
#rootdir:str = st.session_state['defaults'].general.outdir,
|
||||
num_steps:int = 200, # number of steps between each pair of sampled points
|
||||
max_frames:int = 10000, # number of frames to write and then exit the script
|
||||
num_inference_steps:int = 50, # more (e.g. 100, 200 etc) can create slightly better images
|
||||
@ -217,9 +217,9 @@ def txt2vid(
|
||||
):
|
||||
"""
|
||||
prompt = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
|
||||
gpu:int = defaults.general.gpu, # id of the gpu to run on
|
||||
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
|
||||
#name:str = 'test', # name of this project, for the output directory
|
||||
#rootdir:str = defaults.general.outdir,
|
||||
#rootdir:str = st.session_state['defaults'].general.outdir,
|
||||
num_steps:int = 200, # number of steps between each pair of sampled points
|
||||
max_frames:int = 10000, # number of frames to write and then exit the script
|
||||
num_inference_steps:int = 50, # more (e.g. 100, 200 etc) can create slightly better images
|
||||
@ -256,11 +256,11 @@ def txt2vid(
|
||||
# init the output dir
|
||||
sanitized_prompt = slugify(prompts)
|
||||
|
||||
full_path = os.path.join(os.getcwd(), defaults.general.outdir, "txt2vid-samples", "samples", sanitized_prompt)
|
||||
full_path = os.path.join(os.getcwd(), st.session_state['defaults'].general.outdir, "txt2vid-samples", "samples", sanitized_prompt)
|
||||
|
||||
if len(full_path) > 220:
|
||||
sanitized_prompt = sanitized_prompt[:220-len(full_path)]
|
||||
full_path = os.path.join(os.getcwd(), defaults.general.outdir, "txt2vid-samples", "samples", sanitized_prompt)
|
||||
full_path = os.path.join(os.getcwd(), st.session_state['defaults'].general.outdir, "txt2vid-samples", "samples", sanitized_prompt)
|
||||
|
||||
os.makedirs(full_path, exist_ok=True)
|
||||
|
||||
@ -338,8 +338,8 @@ def txt2vid(
|
||||
weights_path,
|
||||
use_local_file=True,
|
||||
use_auth_token=True,
|
||||
#torch_dtype=torch.float16 if not defaults.general.no_half else None,
|
||||
revision="fp16" if not defaults.general.no_half else None
|
||||
#torch_dtype=torch.float16 if not st.session_state['defaults'].general.no_half else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None
|
||||
)
|
||||
|
||||
st.session_state["pipe"].unet.to(torch_device)
|
||||
@ -358,8 +358,8 @@ def txt2vid(
|
||||
weights_path,
|
||||
use_local_file=True,
|
||||
use_auth_token=True,
|
||||
#torch_dtype=torch.float16 if not defaults.general.no_half else None,
|
||||
revision="fp16" if not defaults.general.no_half else None
|
||||
#torch_dtype=torch.float16 if not st.session_state['defaults'].general.no_half else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None
|
||||
)
|
||||
|
||||
st.session_state["pipe"].unet.to(torch_device)
|
||||
@ -449,9 +449,9 @@ def txt2vid(
|
||||
if st.session_state['save_video']:
|
||||
# write video to memory
|
||||
#output = io.BytesIO()
|
||||
#writer = imageio.get_writer(os.path.join(os.getcwd(), defaults.general.outdir, "txt2vid-samples"), im, extension=".mp4", fps=30)
|
||||
#writer = imageio.get_writer(os.path.join(os.getcwd(), st.session_state['defaults'].general.outdir, "txt2vid-samples"), im, extension=".mp4", fps=30)
|
||||
try:
|
||||
video_path = os.path.join(os.getcwd(), defaults.general.outdir, "txt2vid-samples","temp.mp4")
|
||||
video_path = os.path.join(os.getcwd(), st.session_state['defaults'].general.outdir, "txt2vid-samples","temp.mp4")
|
||||
writer = imageio.get_writer(video_path, fps=24)
|
||||
for frame in frames:
|
||||
writer.append_data(frame)
|
||||
@ -493,25 +493,25 @@ def layout():
|
||||
col1, col2, col3 = st.columns([1,2,1], gap="large")
|
||||
|
||||
with col1:
|
||||
width = st.slider("Width:", min_value=64, max_value=2048, value=defaults.txt2vid.width, step=64)
|
||||
height = st.slider("Height:", min_value=64, max_value=2048, value=defaults.txt2vid.height, step=64)
|
||||
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=1.0, max_value=30.0, value=defaults.txt2vid.cfg_scale, step=0.5, help="How strongly the image should follow the prompt.")
|
||||
seed = st.text_input("Seed:", value=defaults.txt2vid.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
#batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=defaults.txt2vid.batch_count, step=1, help="How many iterations or batches of images to generate in total.")
|
||||
#batch_size = st.slider("Batch size", min_value=1, max_value=250, value=defaults.txt2vid.batch_size, step=1,
|
||||
width = st.slider("Width:", min_value=64, max_value=2048, value=st.session_state['defaults'].txt2vid.width, step=64)
|
||||
height = st.slider("Height:", min_value=64, max_value=2048, value=st.session_state['defaults'].txt2vid.height, step=64)
|
||||
cfg_scale = st.slider("CFG (Classifier Free Guidance Scale):", min_value=1.0, max_value=30.0, value=st.session_state['defaults'].txt2vid.cfg_scale, step=0.5, help="How strongly the image should follow the prompt.")
|
||||
seed = st.text_input("Seed:", value=st.session_state['defaults'].txt2vid.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
#batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=st.session_state['defaults'].txt2vid.batch_count, step=1, help="How many iterations or batches of images to generate in total.")
|
||||
#batch_size = st.slider("Batch size", min_value=1, max_value=250, value=st.session_state['defaults'].txt2vid.batch_size, step=1,
|
||||
#help="How many images are at once in a batch.\
|
||||
#It increases the VRAM usage a lot but if you have enough VRAM it can reduce the time it takes to finish generation as more images are generated at once.\
|
||||
#Default: 1")
|
||||
|
||||
st.session_state["max_frames"] = int(st.text_input("Max Frames:", value=defaults.txt2vid.max_frames, help="Specify the max number of frames you want to generate."))
|
||||
st.session_state["max_frames"] = int(st.text_input("Max Frames:", value=st.session_state['defaults'].txt2vid.max_frames, help="Specify the max number of frames you want to generate."))
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=defaults.txt2vid.update_preview,
|
||||
st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].txt2vid.update_preview,
|
||||
help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=defaults.txt2vid.update_preview_frequency,
|
||||
st.session_state["update_preview_frequency"] = st.text_input("Update Image Preview Frequency", value=st.session_state['defaults'].txt2vid.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
with col2:
|
||||
@ -544,8 +544,8 @@ def layout():
|
||||
# If we have custom models available on the "models/custom"
|
||||
#folder then we show a menu to select which model we want to use, otherwise we use the main model for SD
|
||||
#if CustomModel_available:
|
||||
custom_model = st.selectbox("Custom Model:", defaults.txt2vid.custom_models_list,
|
||||
index=defaults.txt2vid.custom_models_list.index(defaults.txt2vid.default_model),
|
||||
custom_model = st.selectbox("Custom Model:", st.session_state['defaults'].txt2vid.custom_models_list,
|
||||
index=st.session_state['defaults'].txt2vid.custom_models_list.index(st.session_state['defaults'].txt2vid.default_model),
|
||||
help="Select the model you want to use. This option is only available if you have custom models \
|
||||
on your 'models/custom' folder. The model name that will be shown here is the same as the name\
|
||||
the file for the model has on said folder, it is recommended to give the .ckpt file a name that \
|
||||
@ -556,21 +556,21 @@ def layout():
|
||||
#custom_model = "CompVis/stable-diffusion-v1-4"
|
||||
#st.session_state["weights_path"] = f"CompVis/{slugify(custom_model.lower())}"
|
||||
|
||||
st.session_state.sampling_steps = st.slider("Sampling Steps", value=defaults.txt2vid.sampling_steps, min_value=10, step=10, max_value=500,
|
||||
st.session_state.sampling_steps = st.slider("Sampling Steps", value=st.session_state['defaults'].txt2vid.sampling_steps, min_value=10, step=10, max_value=500,
|
||||
help="Number of steps between each pair of sampled points")
|
||||
st.session_state.num_inference_steps = st.slider("Inference Steps:", value=defaults.txt2vid.num_inference_steps, min_value=10,step=10, max_value=500,
|
||||
st.session_state.num_inference_steps = st.slider("Inference Steps:", value=st.session_state['defaults'].txt2vid.num_inference_steps, min_value=10,step=10, max_value=500,
|
||||
help="Higher values (e.g. 100, 200 etc) can create better images.")
|
||||
|
||||
#sampler_name_list = ["k_lms", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "k_heun", "PLMS", "DDIM"]
|
||||
#sampler_name = st.selectbox("Sampling method", sampler_name_list,
|
||||
#index=sampler_name_list.index(defaults.txt2vid.default_sampler), help="Sampling method to use. Default: k_euler")
|
||||
#index=sampler_name_list.index(st.session_state['defaults'].txt2vid.default_sampler), help="Sampling method to use. Default: k_euler")
|
||||
scheduler_name_list = ["klms", "ddim"]
|
||||
scheduler_name = st.selectbox("Scheduler:", scheduler_name_list,
|
||||
index=scheduler_name_list.index(defaults.txt2vid.scheduler_name), help="Scheduler to use. Default: klms")
|
||||
index=scheduler_name_list.index(st.session_state['defaults'].txt2vid.scheduler_name), help="Scheduler to use. Default: klms")
|
||||
|
||||
beta_scheduler_type_list = ["scaled_linear", "linear"]
|
||||
beta_scheduler_type = st.selectbox("Beta Schedule Type:", beta_scheduler_type_list,
|
||||
index=beta_scheduler_type_list.index(defaults.txt2vid.beta_scheduler_type), help="Schedule Type to use. Default: linear")
|
||||
index=beta_scheduler_type_list.index(st.session_state['defaults'].txt2vid.beta_scheduler_type), help="Schedule Type to use. Default: linear")
|
||||
|
||||
|
||||
#basic_tab, advanced_tab = st.tabs(["Basic", "Advanced"])
|
||||
@ -580,41 +580,41 @@ def layout():
|
||||
#help="Press the Enter key to summit, when 'No' is selected you can use the Enter key to write multiple lines.")
|
||||
|
||||
with st.expander("Advanced"):
|
||||
st.session_state["separate_prompts"] = st.checkbox("Create Prompt Matrix.", value=defaults.txt2vid.separate_prompts,
|
||||
st.session_state["separate_prompts"] = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].txt2vid.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
st.session_state["normalize_prompt_weights"] = st.checkbox("Normalize Prompt Weights.",
|
||||
value=defaults.txt2vid.normalize_prompt_weights, help="Ensure the sum of all weights add up to 1.0")
|
||||
value=st.session_state['defaults'].txt2vid.normalize_prompt_weights, help="Ensure the sum of all weights add up to 1.0")
|
||||
st.session_state["save_individual_images"] = st.checkbox("Save individual images.",
|
||||
value=defaults.txt2vid.save_individual_images, help="Save each image generated before any filter or enhancement is applied.")
|
||||
st.session_state["save_video"] = st.checkbox("Save video",value=defaults.txt2vid.save_video, help="Save a video with all the images generated as frames at the end of the generation.")
|
||||
st.session_state["group_by_prompt"] = st.checkbox("Group results by prompt", value=defaults.txt2vid.group_by_prompt,
|
||||
value=st.session_state['defaults'].txt2vid.save_individual_images, help="Save each image generated before any filter or enhancement is applied.")
|
||||
st.session_state["save_video"] = st.checkbox("Save video",value=st.session_state['defaults'].txt2vid.save_video, help="Save a video with all the images generated as frames at the end of the generation.")
|
||||
st.session_state["group_by_prompt"] = st.checkbox("Group results by prompt", value=st.session_state['defaults'].txt2vid.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt matrix each prompt combination will have its own folder.")
|
||||
st.session_state["write_info_files"] = st.checkbox("Write Info file", value=defaults.txt2vid.write_info_files,
|
||||
st.session_state["write_info_files"] = st.checkbox("Write Info file", value=st.session_state['defaults'].txt2vid.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
st.session_state["dynamic_preview_frequency"] = st.checkbox("Dynamic Preview Frequency", value=defaults.txt2vid.dynamic_preview_frequency,
|
||||
st.session_state["dynamic_preview_frequency"] = st.checkbox("Dynamic Preview Frequency", value=st.session_state['defaults'].txt2vid.dynamic_preview_frequency,
|
||||
help="This option tries to find the best value at which we can update \
|
||||
the preview image during generation while minimizing the impact it has in performance. Default: True")
|
||||
st.session_state["do_loop"] = st.checkbox("Do Loop", value=defaults.txt2vid.do_loop,
|
||||
st.session_state["do_loop"] = st.checkbox("Do Loop", value=st.session_state['defaults'].txt2vid.do_loop,
|
||||
help="Do loop")
|
||||
st.session_state["save_as_jpg"] = st.checkbox("Save samples as jpg", value=defaults.txt2vid.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
st.session_state["save_as_jpg"] = st.checkbox("Save samples as jpg", value=st.session_state['defaults'].txt2vid.save_as_jpg, help="Saves the images as jpg instead of png.")
|
||||
|
||||
if GFPGAN_available:
|
||||
st.session_state["use_GFPGAN"] = st.checkbox("Use GFPGAN", value=defaults.txt2vid.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation. This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
st.session_state["use_GFPGAN"] = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2vid.use_GFPGAN, help="Uses the GFPGAN model to improve faces after the generation. This greatly improve the quality and consistency of faces but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
else:
|
||||
st.session_state["use_GFPGAN"] = False
|
||||
|
||||
if RealESRGAN_available:
|
||||
st.session_state["use_RealESRGAN"] = st.checkbox("Use RealESRGAN", value=defaults.txt2vid.use_RealESRGAN,
|
||||
st.session_state["use_RealESRGAN"] = st.checkbox("Use RealESRGAN", value=st.session_state['defaults'].txt2vid.use_RealESRGAN,
|
||||
help="Uses the RealESRGAN model to upscale the images after the generation. This greatly improve the quality and lets you have high resolution images but uses extra VRAM. Disable if you need the extra VRAM.")
|
||||
st.session_state["RealESRGAN_model"] = st.selectbox("RealESRGAN model", ["RealESRGAN_x4plus", "RealESRGAN_x4plus_anime_6B"], index=0)
|
||||
else:
|
||||
st.session_state["use_RealESRGAN"] = False
|
||||
st.session_state["RealESRGAN_model"] = "RealESRGAN_x4plus"
|
||||
|
||||
st.session_state["variant_amount"] = st.slider("Variant Amount:", value=defaults.txt2vid.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
st.session_state["variant_seed"] = st.text_input("Variant Seed:", value=defaults.txt2vid.seed, help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
st.session_state["beta_start"] = st.slider("Beta Start:", value=defaults.txt2vid.beta_start, min_value=0.0001, max_value=0.03, step=0.0001, format="%.4f")
|
||||
st.session_state["beta_end"] = st.slider("Beta End:", value=defaults.txt2vid.beta_end, min_value=0.0001, max_value=0.03, step=0.0001, format="%.4f")
|
||||
st.session_state["variant_amount"] = st.slider("Variant Amount:", value=st.session_state['defaults'].txt2vid.variant_amount, min_value=0.0, max_value=1.0, step=0.01)
|
||||
st.session_state["variant_seed"] = st.text_input("Variant Seed:", value=st.session_state['defaults'].txt2vid.seed, help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
st.session_state["beta_start"] = st.slider("Beta Start:", value=st.session_state['defaults'].txt2vid.beta_start, min_value=0.0001, max_value=0.03, step=0.0001, format="%.4f")
|
||||
st.session_state["beta_end"] = st.slider("Beta End:", value=st.session_state['defaults'].txt2vid.beta_end, min_value=0.0001, max_value=0.03, step=0.0001, format="%.4f")
|
||||
|
||||
if generate_button:
|
||||
#print("Loading models")
|
||||
@ -622,7 +622,7 @@ def layout():
|
||||
#load_models(False, False, False, RealESRGAN_model, CustomModel_available=CustomModel_available, custom_model=custom_model)
|
||||
|
||||
# run video generation
|
||||
image, seed, info, stats = txt2vid(prompts=prompt, gpu=defaults.general.gpu,
|
||||
image, seed, info, stats = txt2vid(prompts=prompt, gpu=st.session_state['defaults'].general.gpu,
|
||||
num_steps=st.session_state.sampling_steps, max_frames=int(st.session_state.max_frames),
|
||||
num_inference_steps=st.session_state.num_inference_steps,
|
||||
cfg_scale=cfg_scale,do_loop=st.session_state["do_loop"],
|
||||
|
@ -22,20 +22,14 @@ except:
|
||||
# remove some annoying deprecation warnings that show every now and then.
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
|
||||
|
||||
|
||||
|
||||
st.session_state["defaults"] = OmegaConf.load(os.path.join("configs","webui", "webui_streamlit.yaml"))
|
||||
|
||||
if (os.path.exists(os.path.join("configs","webui", "userconfig_streamlit.yaml"))):
|
||||
user_defaults = OmegaConf.load(os.path.join("configs","webui", "userconfig_streamlit.yaml"));
|
||||
st.session_state["defaults"] = OmegaConf.load("configs/webui/webui_streamlit.yaml")
|
||||
if (os.path.exists("configs/webui/userconfig_streamlit.yaml")):
|
||||
user_defaults = OmegaConf.load("configs/webui/userconfig_streamlit.yaml");
|
||||
st.session_state["defaults"] = OmegaConf.merge(st.session_state["defaults"], user_defaults)
|
||||
|
||||
defaults = st.session_state["defaults"]
|
||||
|
||||
# this should force GFPGAN and RealESRGAN onto the selected gpu as well
|
||||
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(defaults.general.gpu)
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(st.session_state["defaults"].general.gpu)
|
||||
|
||||
# functions to load css locally OR remotely starts here. Options exist for future flexibility. Called as st.markdown with unsafe_allow_html as css injection
|
||||
# TODO, maybe look into async loading the file especially for remote fetching
|
||||
@ -61,12 +55,12 @@ def layout():
|
||||
load_css(True, 'frontend/css/streamlit.main.css')
|
||||
|
||||
# check if the models exist on their respective folders
|
||||
if os.path.exists(os.path.join(defaults.general.GFPGAN_dir, "experiments", "pretrained_models", "GFPGANv1.3.pth")):
|
||||
if os.path.exists(os.path.join(st.session_state["defaults"].general.GFPGAN_dir, "experiments", "pretrained_models", "GFPGANv1.3.pth")):
|
||||
st.session_state["GFPGAN_available"] = True
|
||||
else:
|
||||
st.session_state["GFPGAN_available"] = False
|
||||
|
||||
if os.path.exists(os.path.join(defaults.general.RealESRGAN_dir, "experiments","pretrained_models", f"{defaults.general.RealESRGAN_model}.pth")):
|
||||
if os.path.exists(os.path.join(st.session_state["defaults"].general.RealESRGAN_dir, "experiments","pretrained_models", f"{st.session_state['defaults'].general.RealESRGAN_model}.pth")):
|
||||
st.session_state["RealESRGAN_available"] = True
|
||||
else:
|
||||
st.session_state["RealESRGAN_available"] = False
|
||||
|
Loading…
Reference in New Issue
Block a user