KeyError: 'pipe'

This commit is contained in:
hlky 2022-10-05 02:38:40 +01:00
parent b650c162e6
commit cd700692eb
No known key found for this signature in database
GPG Key ID: 55A99F1E80D907D5
2 changed files with 14 additions and 19 deletions

View File

@ -127,7 +127,7 @@ txt2img:
txt2vid: txt2vid:
default_model: "CompVis/stable-diffusion-v1-4" default_model: "CompVis/stable-diffusion-v1-4"
custom_models_list: ["CompVis/stable-diffusion-v1-4", "hakurei/waifu-diffusion"] custom_models_list: ["CompVis/stable-diffusion-v1-4"]
prompt: prompt:
width: width:
value: 512 value: 512
@ -205,7 +205,7 @@ txt2vid:
step: 0.0001 step: 0.0001
format: "%.5f" format: "%.5f"
beta_scheduler_type: "linear" beta_scheduler_type: "scaled_linear"
max_frames: 100 max_frames: 100
LDSR_config: LDSR_config:
@ -310,8 +310,8 @@ gfpgan:
strength: 100 strength: 100
textual_inversion: textual_inversion:
pretrained_model_name_or_path: "models/ldm/stable-diffusion-v1-4" pretrained_model_name_or_path: "models/diffusers/stable-diffusion-v1-4"
tokenizer_name: "" tokenizer_name: "models/clip-vit-large-patch14"
daisi_app: daisi_app:

View File

@ -208,7 +208,6 @@ def diffuse(
return image2 return image2
# #
@st.experimental_singleton(show_spinner=False, suppress_st_warning=True)
def load_diffusers_model(weights_path,torch_device): def load_diffusers_model(weights_path,torch_device):
with server_state_lock["model"]: with server_state_lock["model"]:
if "model" in server_state: if "model" in server_state:
@ -219,7 +218,7 @@ def load_diffusers_model(weights_path,torch_device):
try: try:
with server_state_lock["pipe"]: with server_state_lock["pipe"]:
if not "pipe" in st.session_state or st.session_state["weights_path"] != weights_path: if "pipe" not in server_state:
if ("weights_path" in st.session_state) and st.session_state["weights_path"] != weights_path: if ("weights_path" in st.session_state) and st.session_state["weights_path"] != weights_path:
del st.session_state["weights_path"] del st.session_state["weights_path"]
@ -227,8 +226,6 @@ def load_diffusers_model(weights_path,torch_device):
# if folder "models/diffusers/stable-diffusion-v1-4" exists, load the model from there # if folder "models/diffusers/stable-diffusion-v1-4" exists, load the model from there
if weights_path == "CompVis/stable-diffusion-v1-4": if weights_path == "CompVis/stable-diffusion-v1-4":
model_path = os.path.join("models", "diffusers", "stable-diffusion-v1-4") model_path = os.path.join("models", "diffusers", "stable-diffusion-v1-4")
elif weights_path == "hakurei/waifu-diffusion":
model_path = os.path.join("models", "diffusers", "waifu-diffusion")
if not os.path.exists(model_path + "/model_index.json"): if not os.path.exists(model_path + "/model_index.json"):
server_state["pipe"] = StableDiffusionPipeline.from_pretrained( server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
@ -392,20 +389,18 @@ def txt2vid(
SCHEDULERS = dict(default=default_scheduler, ddim=ddim_scheduler, klms=klms_scheduler) SCHEDULERS = dict(default=default_scheduler, ddim=ddim_scheduler, klms=klms_scheduler)
# ------------------------------------------------------------------------------ if "pipe" not in server_state:
#st.session_state["progress_bar_text"].text("Loading models...") with st.session_state["progress_bar_text"].container():
with st.session_state["progress_bar_text"].container(): with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
try:
if "model" in st.session_state: if "model" in st.session_state:
del st.session_state["model"] del st.session_state["model"]
except: load_diffusers_model(weights_path, torch_device)
pass else:
print("Model already loaded")
#print (st.session_state["weights_path"] != weights_path)
load_diffusers_model(weights_path, torch_device)
if "pipe" not in server_state:
print('wtf')
server_state["pipe"].scheduler = SCHEDULERS[scheduler] server_state["pipe"].scheduler = SCHEDULERS[scheduler]
server_state["pipe"].use_multiprocessing_for_evaluation = False server_state["pipe"].use_multiprocessing_for_evaluation = False