mirror of
https://github.com/Sygil-Dev/sygil-webui.git
synced 2024-12-14 14:05:36 +03:00
Added option to the Settings page to enable and disable the suggestion box.
This commit is contained in:
parent
3e87802c9f
commit
3868b79b07
@ -57,6 +57,7 @@ general:
|
||||
n_rows: -1
|
||||
no_verify_input: False
|
||||
show_percent_in_tab_title: True
|
||||
enable_suggestions: True
|
||||
no_half: False
|
||||
use_float16: False
|
||||
precision: "autocast"
|
||||
|
@ -213,6 +213,9 @@ def layout():
|
||||
help="Add the progress percent value to the page title on the tab on your browser. "
|
||||
"This is useful in case you need to know how the generation is going while doign something else"
|
||||
"in another tab on your browser. Default: True")
|
||||
|
||||
st.session_state["defaults"].general.enable_suggestions = st.checkbox("Enable Suggestions Box", value=st.session_state['defaults'].general.enable_suggestions,
|
||||
help="Adds a suggestion box under the prompt when clicked. Default: True")
|
||||
|
||||
st.session_state["defaults"].daisi_app.running_on_daisi_io = st.checkbox("Running on Daisi.io?", value=st.session_state['defaults'].daisi_app.running_on_daisi_io,
|
||||
help="Specify if we are running on app.Daisi.io . Default: False")
|
||||
|
@ -379,7 +379,10 @@ def layout():
|
||||
#prompt = st.text_area("Input Text","")
|
||||
placeholder = "A corgi wearing a top hat as an oil painting."
|
||||
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state["defaults"].general.enable_suggestions:
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state['defaults'].admin.global_negative_prompt:
|
||||
|
@ -109,6 +109,9 @@ try:
|
||||
except:
|
||||
pass
|
||||
|
||||
# disable diffusers telemetry
|
||||
os.environ["DISABLE_TELEMETRY"] = "YES"
|
||||
|
||||
# remove some annoying deprecation warnings that show every now and then.
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
|
@ -427,7 +427,10 @@ def layout():
|
||||
#prompt = st.text_area("Input Text","")
|
||||
placeholder = "A corgi wearing a top hat as an oil painting."
|
||||
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state["defaults"].general.enable_suggestions:
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state['defaults'].admin.global_negative_prompt:
|
||||
|
@ -942,12 +942,12 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
def diffuse(
|
||||
pipe,
|
||||
cond_embeddings, # text conditioning, should be (1, 77, 768)
|
||||
cond_latents, # image conditioning, should be (1, 4, 64, 64)
|
||||
num_inference_steps,
|
||||
cfg_scale,
|
||||
eta,
|
||||
fps=30
|
||||
):
|
||||
cond_latents, # image conditioning, should be (1, 4, 64, 64)
|
||||
num_inference_steps,
|
||||
cfg_scale,
|
||||
eta,
|
||||
fps=30
|
||||
):
|
||||
|
||||
torch_device = cond_latents.get_device()
|
||||
|
||||
@ -1133,7 +1133,7 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
if not os.path.exists(model_path + "/model_index.json"):
|
||||
server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
|
||||
weights_path,
|
||||
use_local_file=True,
|
||||
#use_local_file=True,
|
||||
use_auth_token=st.session_state["defaults"].general.huggingface_token,
|
||||
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None,
|
||||
@ -1146,7 +1146,7 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
else:
|
||||
server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
|
||||
model_path,
|
||||
use_local_file=True,
|
||||
#use_local_file=True,
|
||||
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None,
|
||||
safety_checker=None, # Very important for videos...lots of false positives while interpolating
|
||||
@ -1181,7 +1181,8 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
server_state['float16'] = st.session_state['defaults'].general.use_float16
|
||||
server_state['no_half'] = st.session_state['defaults'].general.no_half
|
||||
server_state['optimized'] = st.session_state['defaults'].general.optimized
|
||||
|
||||
|
||||
#with no_rerun:
|
||||
load_diffusers_model(weights_path, torch_device)
|
||||
else:
|
||||
logger.info("Tx2Vid Model already Loaded")
|
||||
@ -1321,28 +1322,28 @@ def txt2vid(
|
||||
with open(os.path.join(full_path , f'{slugify(str(seeds))}_config.json' if len(prompts) > 1 else "prompts_config.json"), "w") as outfile:
|
||||
outfile.write(json.dumps(
|
||||
dict(
|
||||
prompts = prompts,
|
||||
gpu = gpu,
|
||||
num_steps = num_steps,
|
||||
max_duration_in_seconds = max_duration_in_seconds,
|
||||
num_inference_steps = num_inference_steps,
|
||||
cfg_scale = cfg_scale,
|
||||
do_loop = do_loop,
|
||||
use_lerp_for_text = use_lerp_for_text,
|
||||
seeds = seeds,
|
||||
quality = quality,
|
||||
eta = eta,
|
||||
width = width,
|
||||
height = height,
|
||||
weights_path = weights_path,
|
||||
scheduler=scheduler,
|
||||
disable_tqdm = disable_tqdm,
|
||||
beta_start = beta_start,
|
||||
beta_end = beta_end,
|
||||
beta_schedule = beta_schedule
|
||||
),
|
||||
indent=2,
|
||||
sort_keys=False,
|
||||
prompts = prompts,
|
||||
gpu = gpu,
|
||||
num_steps = num_steps,
|
||||
max_duration_in_seconds = max_duration_in_seconds,
|
||||
num_inference_steps = num_inference_steps,
|
||||
cfg_scale = cfg_scale,
|
||||
do_loop = do_loop,
|
||||
use_lerp_for_text = use_lerp_for_text,
|
||||
seeds = seeds,
|
||||
quality = quality,
|
||||
eta = eta,
|
||||
width = width,
|
||||
height = height,
|
||||
weights_path = weights_path,
|
||||
scheduler=scheduler,
|
||||
disable_tqdm = disable_tqdm,
|
||||
beta_start = beta_start,
|
||||
beta_end = beta_end,
|
||||
beta_schedule = beta_schedule
|
||||
),
|
||||
indent=2,
|
||||
sort_keys=False,
|
||||
))
|
||||
|
||||
#print(scheduler)
|
||||
@ -1386,10 +1387,11 @@ def txt2vid(
|
||||
#flaxddpms=flaxddpms_scheduler,
|
||||
#flaxpndms=flaxpndms_scheduler,
|
||||
)
|
||||
|
||||
with st.session_state["progress_bar_text"].container():
|
||||
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
|
||||
load_diffusers_model(weights_path, torch_device)
|
||||
|
||||
with no_rerun:
|
||||
with st.session_state["progress_bar_text"].container():
|
||||
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
|
||||
load_diffusers_model(weights_path, torch_device)
|
||||
|
||||
if "pipe" not in server_state:
|
||||
logger.error('wtf')
|
||||
@ -1632,7 +1634,10 @@ def layout():
|
||||
#prompt = st.text_area("Input Text","")
|
||||
placeholder = "A corgi wearing a top hat as an oil painting."
|
||||
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state["defaults"].general.enable_suggestions:
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state['defaults'].admin.global_negative_prompt:
|
||||
@ -1915,25 +1920,25 @@ def layout():
|
||||
#print("Loading models")
|
||||
# load the models when we hit the generate button for the first time, it wont be loaded after that so dont worry.
|
||||
#load_models(False, st.session_state["use_GFPGAN"], True, st.session_state["RealESRGAN_model"])
|
||||
with no_rerun:
|
||||
if st.session_state["use_GFPGAN"]:
|
||||
if "GFPGAN" in server_state:
|
||||
logger.info("GFPGAN already loaded")
|
||||
else:
|
||||
with col2:
|
||||
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
|
||||
# Load GFPGAN
|
||||
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
|
||||
try:
|
||||
load_GFPGAN()
|
||||
logger.info("Loaded GFPGAN")
|
||||
except Exception:
|
||||
import traceback
|
||||
logger.error("Error loading GFPGAN:", file=sys.stderr)
|
||||
logger.error(traceback.format_exc(), file=sys.stderr)
|
||||
#with no_rerun:
|
||||
if st.session_state["use_GFPGAN"]:
|
||||
if "GFPGAN" in server_state:
|
||||
logger.info("GFPGAN already loaded")
|
||||
else:
|
||||
if "GFPGAN" in server_state:
|
||||
del server_state["GFPGAN"]
|
||||
with col2:
|
||||
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
|
||||
# Load GFPGAN
|
||||
if os.path.exists(st.session_state["defaults"].general.GFPGAN_dir):
|
||||
try:
|
||||
load_GFPGAN()
|
||||
logger.info("Loaded GFPGAN")
|
||||
except Exception:
|
||||
import traceback
|
||||
logger.error("Error loading GFPGAN:", file=sys.stderr)
|
||||
logger.error(traceback.format_exc(), file=sys.stderr)
|
||||
else:
|
||||
if "GFPGAN" in server_state:
|
||||
del server_state["GFPGAN"]
|
||||
|
||||
#try:
|
||||
# run video generation
|
||||
|
Loading…
Reference in New Issue
Block a user