mirror of
https://github.com/sd-webui/stable-diffusion-webui.git
synced 2024-12-13 18:02:31 +03:00
Fixed issue with new ldm folder requiring the personalization_config to be set even if empty.
Added shutup as a dependency to shutup python warnings for good.
This commit is contained in:
parent
5841dc85a6
commit
2e2b35ff71
@ -67,6 +67,7 @@ retry==0.9.2 # used by sd_utils
|
|||||||
python-slugify==6.1.2 # used by sd_utils
|
python-slugify==6.1.2 # used by sd_utils
|
||||||
piexif==1.1.3 # used by sd_utils
|
piexif==1.1.3 # used by sd_utils
|
||||||
pywebview==3.6.3 # used by streamlit_webview.py
|
pywebview==3.6.3 # used by streamlit_webview.py
|
||||||
|
shutup==0.2.0 # remove all the annoying warnings
|
||||||
|
|
||||||
accelerate==0.12.0
|
accelerate==0.12.0
|
||||||
albumentations==0.4.3
|
albumentations==0.4.3
|
||||||
|
@ -72,6 +72,7 @@ from io import BytesIO
|
|||||||
from packaging import version
|
from packaging import version
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from huggingface_hub import hf_hub_download
|
from huggingface_hub import hf_hub_download
|
||||||
|
import shutup
|
||||||
|
|
||||||
#import librosa
|
#import librosa
|
||||||
from logger import logger, set_logger_verbosity, quiesce_logger
|
from logger import logger, set_logger_verbosity, quiesce_logger
|
||||||
@ -91,6 +92,9 @@ except ImportError as e:
|
|||||||
# end of imports
|
# end of imports
|
||||||
#---------------------------------------------------------------------------------------------------------------
|
#---------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# remove all the annoying python warnings.
|
||||||
|
shutup.please()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
||||||
from transformers import logging
|
from transformers import logging
|
||||||
@ -261,10 +265,13 @@ def set_page_title(title):
|
|||||||
|
|
||||||
|
|
||||||
def make_grid(n_items=5, n_cols=5):
|
def make_grid(n_items=5, n_cols=5):
|
||||||
|
# Compute number of rows
|
||||||
n_rows = 1 + n_items // int(n_cols)
|
n_rows = 1 + n_items // int(n_cols)
|
||||||
|
|
||||||
|
# Create rows
|
||||||
rows = [st.container() for _ in range(n_rows)]
|
rows = [st.container() for _ in range(n_rows)]
|
||||||
|
|
||||||
|
# Create columns in each row
|
||||||
cols_per_row = [r.columns(n_cols) for r in rows]
|
cols_per_row = [r.columns(n_cols) for r in rows]
|
||||||
cols = [column for row in cols_per_row for column in row]
|
cols = [column for row in cols_per_row for column in row]
|
||||||
|
|
||||||
@ -272,29 +279,29 @@ def make_grid(n_items=5, n_cols=5):
|
|||||||
|
|
||||||
|
|
||||||
def merge(file1, file2, out, weight):
|
def merge(file1, file2, out, weight):
|
||||||
alpha = (weight)/100
|
|
||||||
if not(file1.endswith(".ckpt")):
|
if not(file1.endswith(".ckpt")):
|
||||||
file1 += ".ckpt"
|
file1 += ".ckpt"
|
||||||
if not(file2.endswith(".ckpt")):
|
if not(file2.endswith(".ckpt")):
|
||||||
file2 += ".ckpt"
|
file2 += ".ckpt"
|
||||||
if not(out.endswith(".ckpt")):
|
if not(out.endswith(".ckpt")):
|
||||||
out += ".ckpt"
|
out += ".ckpt"
|
||||||
#Load Models
|
try:
|
||||||
model_0 = torch.load(file1)
|
#Load Models
|
||||||
model_1 = torch.load(file2)
|
model_0 = torch.load(file1)
|
||||||
theta_0 = model_0['state_dict']
|
model_1 = torch.load(file2)
|
||||||
theta_1 = model_1['state_dict']
|
theta_0 = model_0['state_dict']
|
||||||
|
theta_1 = model_1['state_dict']
|
||||||
for key in theta_0.keys():
|
alpha = (weight)/100
|
||||||
if 'model' in key and key in theta_1:
|
for key in theta_0.keys():
|
||||||
theta_0[key] = (alpha) * theta_0[key] + (1-alpha) * theta_1[key]
|
if 'model' in key and key in theta_1:
|
||||||
|
theta_0[key] = (alpha) * theta_0[key] + (1-alpha) * theta_1[key]
|
||||||
logger.info("RUNNING...\n(STAGE 2)")
|
logger.info("RUNNING...\n(STAGE 2)")
|
||||||
|
for key in theta_1.keys():
|
||||||
for key in theta_1.keys():
|
if 'model' in key and key not in theta_0:
|
||||||
if 'model' in key and key not in theta_0:
|
theta_0[key] = theta_1[key]
|
||||||
theta_0[key] = theta_1[key]
|
torch.save(model_0, out)
|
||||||
torch.save(model_0, out)
|
except:
|
||||||
|
logger.error("Error in merging")
|
||||||
|
|
||||||
|
|
||||||
def human_readable_size(size, decimal_places=3):
|
def human_readable_size(size, decimal_places=3):
|
||||||
@ -483,7 +490,7 @@ def load_model_from_config(config, ckpt, verbose=False):
|
|||||||
if "global_step" in pl_sd:
|
if "global_step" in pl_sd:
|
||||||
logger.info(f"Global Step: {pl_sd['global_step']}")
|
logger.info(f"Global Step: {pl_sd['global_step']}")
|
||||||
sd = pl_sd["state_dict"]
|
sd = pl_sd["state_dict"]
|
||||||
model = instantiate_from_config(config.model)
|
model = instantiate_from_config(config.model, personalization_config='')
|
||||||
m, u = model.load_state_dict(sd, strict=False)
|
m, u = model.load_state_dict(sd, strict=False)
|
||||||
if len(m) > 0 and verbose:
|
if len(m) > 0 and verbose:
|
||||||
logger.info("missing keys:")
|
logger.info("missing keys:")
|
||||||
@ -2395,7 +2402,7 @@ def process_images(
|
|||||||
else: # just behave like usual
|
else: # just behave like usual
|
||||||
c = (server_state["model"] if not st.session_state['defaults'].general.optimized else server_state["modelCS"]).get_learned_conditioning(prompts)
|
c = (server_state["model"] if not st.session_state['defaults'].general.optimized else server_state["modelCS"]).get_learned_conditioning(prompts)
|
||||||
|
|
||||||
|
|
||||||
shape = [opt_C, height // opt_f, width // opt_f]
|
shape = [opt_C, height // opt_f, width // opt_f]
|
||||||
|
|
||||||
if st.session_state['defaults'].general.optimized:
|
if st.session_state['defaults'].general.optimized:
|
||||||
|
Loading…
Reference in New Issue
Block a user