diff --git a/requirements.txt b/requirements.txt index 7a8e778..d0ac8c6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -67,6 +67,7 @@ retry==0.9.2 # used by sd_utils python-slugify==6.1.2 # used by sd_utils piexif==1.1.3 # used by sd_utils pywebview==3.6.3 # used by streamlit_webview.py +shutup==0.2.0 # remove all the annoying warnings accelerate==0.12.0 albumentations==0.4.3 diff --git a/scripts/sd_utils/__init__.py b/scripts/sd_utils/__init__.py index c099d12..06d0fba 100644 --- a/scripts/sd_utils/__init__.py +++ b/scripts/sd_utils/__init__.py @@ -72,6 +72,7 @@ from io import BytesIO from packaging import version from pathlib import Path from huggingface_hub import hf_hub_download +import shutup #import librosa from logger import logger, set_logger_verbosity, quiesce_logger @@ -91,6 +92,9 @@ except ImportError as e: # end of imports #--------------------------------------------------------------------------------------------------------------- +# remove all the annoying python warnings. +shutup.please() + try: # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. from transformers import logging @@ -261,10 +265,13 @@ def set_page_title(title): def make_grid(n_items=5, n_cols=5): + # Compute number of rows n_rows = 1 + n_items // int(n_cols) + # Create rows rows = [st.container() for _ in range(n_rows)] + # Create columns in each row cols_per_row = [r.columns(n_cols) for r in rows] cols = [column for row in cols_per_row for column in row] @@ -272,29 +279,29 @@ def make_grid(n_items=5, n_cols=5): def merge(file1, file2, out, weight): - alpha = (weight)/100 if not(file1.endswith(".ckpt")): file1 += ".ckpt" if not(file2.endswith(".ckpt")): file2 += ".ckpt" if not(out.endswith(".ckpt")): out += ".ckpt" - #Load Models - model_0 = torch.load(file1) - model_1 = torch.load(file2) - theta_0 = model_0['state_dict'] - theta_1 = model_1['state_dict'] - - for key in theta_0.keys(): - if 'model' in key and key in theta_1: - theta_0[key] = (alpha) * theta_0[key] + (1-alpha) * theta_1[key] - - logger.info("RUNNING...\n(STAGE 2)") - - for key in theta_1.keys(): - if 'model' in key and key not in theta_0: - theta_0[key] = theta_1[key] - torch.save(model_0, out) + try: + #Load Models + model_0 = torch.load(file1) + model_1 = torch.load(file2) + theta_0 = model_0['state_dict'] + theta_1 = model_1['state_dict'] + alpha = (weight)/100 + for key in theta_0.keys(): + if 'model' in key and key in theta_1: + theta_0[key] = (alpha) * theta_0[key] + (1-alpha) * theta_1[key] + logger.info("RUNNING...\n(STAGE 2)") + for key in theta_1.keys(): + if 'model' in key and key not in theta_0: + theta_0[key] = theta_1[key] + torch.save(model_0, out) + except: + logger.error("Error in merging") def human_readable_size(size, decimal_places=3): @@ -483,7 +490,7 @@ def load_model_from_config(config, ckpt, verbose=False): if "global_step" in pl_sd: logger.info(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] - model = instantiate_from_config(config.model) + model = instantiate_from_config(config.model, personalization_config='') m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: logger.info("missing keys:") @@ -2395,7 +2402,7 @@ def process_images( else: # just behave like usual c = (server_state["model"] if not st.session_state['defaults'].general.optimized else server_state["modelCS"]).get_learned_conditioning(prompts) - + shape = [opt_C, height // opt_f, width // opt_f] if st.session_state['defaults'].general.optimized: