Added option to the Settings page to enable and disable the suggestion box.

This commit is contained in:
ZeroCool940711 2022-12-14 19:35:14 -07:00
parent 3e87802c9f
commit 3868b79b07
No known key found for this signature in database
GPG Key ID: 4E4072992B5BC640
6 changed files with 74 additions and 56 deletions

View File

@ -57,6 +57,7 @@ general:
n_rows: -1 n_rows: -1
no_verify_input: False no_verify_input: False
show_percent_in_tab_title: True show_percent_in_tab_title: True
enable_suggestions: True
no_half: False no_half: False
use_float16: False use_float16: False
precision: "autocast" precision: "autocast"

View File

@ -213,6 +213,9 @@ def layout():
help="Add the progress percent value to the page title on the tab on your browser. " help="Add the progress percent value to the page title on the tab on your browser. "
"This is useful in case you need to know how the generation is going while doign something else" "This is useful in case you need to know how the generation is going while doign something else"
"in another tab on your browser. Default: True") "in another tab on your browser. Default: True")
st.session_state["defaults"].general.enable_suggestions = st.checkbox("Enable Suggestions Box", value=st.session_state['defaults'].general.enable_suggestions,
help="Adds a suggestion box under the prompt when clicked. Default: True")
st.session_state["defaults"].daisi_app.running_on_daisi_io = st.checkbox("Running on Daisi.io?", value=st.session_state['defaults'].daisi_app.running_on_daisi_io, st.session_state["defaults"].daisi_app.running_on_daisi_io = st.checkbox("Running on Daisi.io?", value=st.session_state['defaults'].daisi_app.running_on_daisi_io,
help="Specify if we are running on app.Daisi.io . Default: False") help="Specify if we are running on app.Daisi.io . Default: False")

View File

@ -379,7 +379,10 @@ def layout():
#prompt = st.text_area("Input Text","") #prompt = st.text_area("Input Text","")
placeholder = "A corgi wearing a top hat as an oil painting." placeholder = "A corgi wearing a top hat as an oil painting."
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54) prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
sygil_suggestions.suggestion_area(placeholder)
if "defaults" in st.session_state:
if st.session_state["defaults"].general.enable_suggestions:
sygil_suggestions.suggestion_area(placeholder)
if "defaults" in st.session_state: if "defaults" in st.session_state:
if st.session_state['defaults'].admin.global_negative_prompt: if st.session_state['defaults'].admin.global_negative_prompt:

View File

@ -109,6 +109,9 @@ try:
except: except:
pass pass
# disable diffusers telemetry
os.environ["DISABLE_TELEMETRY"] = "YES"
# remove some annoying deprecation warnings that show every now and then. # remove some annoying deprecation warnings that show every now and then.
warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=UserWarning)

View File

@ -427,7 +427,10 @@ def layout():
#prompt = st.text_area("Input Text","") #prompt = st.text_area("Input Text","")
placeholder = "A corgi wearing a top hat as an oil painting." placeholder = "A corgi wearing a top hat as an oil painting."
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54) prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
sygil_suggestions.suggestion_area(placeholder)
if "defaults" in st.session_state:
if st.session_state["defaults"].general.enable_suggestions:
sygil_suggestions.suggestion_area(placeholder)
if "defaults" in st.session_state: if "defaults" in st.session_state:
if st.session_state['defaults'].admin.global_negative_prompt: if st.session_state['defaults'].admin.global_negative_prompt:

View File

@ -942,12 +942,12 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
def diffuse( def diffuse(
pipe, pipe,
cond_embeddings, # text conditioning, should be (1, 77, 768) cond_embeddings, # text conditioning, should be (1, 77, 768)
cond_latents, # image conditioning, should be (1, 4, 64, 64) cond_latents, # image conditioning, should be (1, 4, 64, 64)
num_inference_steps, num_inference_steps,
cfg_scale, cfg_scale,
eta, eta,
fps=30 fps=30
): ):
torch_device = cond_latents.get_device() torch_device = cond_latents.get_device()
@ -1133,7 +1133,7 @@ def load_diffusers_model(weights_path,torch_device):
if not os.path.exists(model_path + "/model_index.json"): if not os.path.exists(model_path + "/model_index.json"):
server_state["pipe"] = StableDiffusionPipeline.from_pretrained( server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
weights_path, weights_path,
use_local_file=True, #use_local_file=True,
use_auth_token=st.session_state["defaults"].general.huggingface_token, use_auth_token=st.session_state["defaults"].general.huggingface_token,
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None, torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
revision="fp16" if not st.session_state['defaults'].general.no_half else None, revision="fp16" if not st.session_state['defaults'].general.no_half else None,
@ -1146,7 +1146,7 @@ def load_diffusers_model(weights_path,torch_device):
else: else:
server_state["pipe"] = StableDiffusionPipeline.from_pretrained( server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
model_path, model_path,
use_local_file=True, #use_local_file=True,
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None, torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
revision="fp16" if not st.session_state['defaults'].general.no_half else None, revision="fp16" if not st.session_state['defaults'].general.no_half else None,
safety_checker=None, # Very important for videos...lots of false positives while interpolating safety_checker=None, # Very important for videos...lots of false positives while interpolating
@ -1181,7 +1181,8 @@ def load_diffusers_model(weights_path,torch_device):
server_state['float16'] = st.session_state['defaults'].general.use_float16 server_state['float16'] = st.session_state['defaults'].general.use_float16
server_state['no_half'] = st.session_state['defaults'].general.no_half server_state['no_half'] = st.session_state['defaults'].general.no_half
server_state['optimized'] = st.session_state['defaults'].general.optimized server_state['optimized'] = st.session_state['defaults'].general.optimized
#with no_rerun:
load_diffusers_model(weights_path, torch_device) load_diffusers_model(weights_path, torch_device)
else: else:
logger.info("Tx2Vid Model already Loaded") logger.info("Tx2Vid Model already Loaded")
@ -1321,28 +1322,28 @@ def txt2vid(
with open(os.path.join(full_path , f'{slugify(str(seeds))}_config.json' if len(prompts) > 1 else "prompts_config.json"), "w") as outfile: with open(os.path.join(full_path , f'{slugify(str(seeds))}_config.json' if len(prompts) > 1 else "prompts_config.json"), "w") as outfile:
outfile.write(json.dumps( outfile.write(json.dumps(
dict( dict(
prompts = prompts, prompts = prompts,
gpu = gpu, gpu = gpu,
num_steps = num_steps, num_steps = num_steps,
max_duration_in_seconds = max_duration_in_seconds, max_duration_in_seconds = max_duration_in_seconds,
num_inference_steps = num_inference_steps, num_inference_steps = num_inference_steps,
cfg_scale = cfg_scale, cfg_scale = cfg_scale,
do_loop = do_loop, do_loop = do_loop,
use_lerp_for_text = use_lerp_for_text, use_lerp_for_text = use_lerp_for_text,
seeds = seeds, seeds = seeds,
quality = quality, quality = quality,
eta = eta, eta = eta,
width = width, width = width,
height = height, height = height,
weights_path = weights_path, weights_path = weights_path,
scheduler=scheduler, scheduler=scheduler,
disable_tqdm = disable_tqdm, disable_tqdm = disable_tqdm,
beta_start = beta_start, beta_start = beta_start,
beta_end = beta_end, beta_end = beta_end,
beta_schedule = beta_schedule beta_schedule = beta_schedule
), ),
indent=2, indent=2,
sort_keys=False, sort_keys=False,
)) ))
#print(scheduler) #print(scheduler)
@ -1386,10 +1387,11 @@ def txt2vid(
#flaxddpms=flaxddpms_scheduler, #flaxddpms=flaxddpms_scheduler,
#flaxpndms=flaxpndms_scheduler, #flaxpndms=flaxpndms_scheduler,
) )
with st.session_state["progress_bar_text"].container(): with no_rerun:
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]): with st.session_state["progress_bar_text"].container():
load_diffusers_model(weights_path, torch_device) with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
load_diffusers_model(weights_path, torch_device)
if "pipe" not in server_state: if "pipe" not in server_state:
logger.error('wtf') logger.error('wtf')
@ -1632,7 +1634,10 @@ def layout():
#prompt = st.text_area("Input Text","") #prompt = st.text_area("Input Text","")
placeholder = "A corgi wearing a top hat as an oil painting." placeholder = "A corgi wearing a top hat as an oil painting."
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54) prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
sygil_suggestions.suggestion_area(placeholder)
if "defaults" in st.session_state:
if st.session_state["defaults"].general.enable_suggestions:
sygil_suggestions.suggestion_area(placeholder)
if "defaults" in st.session_state: if "defaults" in st.session_state:
if st.session_state['defaults'].admin.global_negative_prompt: if st.session_state['defaults'].admin.global_negative_prompt:
@ -1915,25 +1920,25 @@ def layout():
#print("Loading models") #print("Loading models")
# load the models when we hit the generate button for the first time, it wont be loaded after that so dont worry. # load the models when we hit the generate button for the first time, it wont be loaded after that so dont worry.
#load_models(False, st.session_state["use_GFPGAN"], True, st.session_state["RealESRGAN_model"]) #load_models(False, st.session_state["use_GFPGAN"], True, st.session_state["RealESRGAN_model"])
with no_rerun: #with no_rerun:
if st.session_state["use_GFPGAN"]: if st.session_state["use_GFPGAN"]:
if "GFPGAN" in server_state: if "GFPGAN" in server_state:
logger.info("GFPGAN already loaded") logger.info("GFPGAN already loaded")
else:
with col2:
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
# Load GFPGAN
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
try:
load_GFPGAN()
logger.info("Loaded GFPGAN")
except Exception:
import traceback
logger.error("Error loading GFPGAN:", file=sys.stderr)
logger.error(traceback.format_exc(), file=sys.stderr)
else: else:
if "GFPGAN" in server_state: with col2:
del server_state["GFPGAN"] with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
# Load GFPGAN
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
try:
load_GFPGAN()
logger.info("Loaded GFPGAN")
except Exception:
import traceback
logger.error("Error loading GFPGAN:", file=sys.stderr)
logger.error(traceback.format_exc(), file=sys.stderr)
else:
if "GFPGAN" in server_state:
del server_state["GFPGAN"]
#try: #try:
# run video generation # run video generation