mirror of
https://github.com/Sygil-Dev/sygil-webui.git
synced 2024-12-14 22:13:41 +03:00
Added option to the Settings page to enable and disable the suggestion box.
This commit is contained in:
parent
3e87802c9f
commit
3868b79b07
@ -57,6 +57,7 @@ general:
|
||||
n_rows: -1
|
||||
no_verify_input: False
|
||||
show_percent_in_tab_title: True
|
||||
enable_suggestions: True
|
||||
no_half: False
|
||||
use_float16: False
|
||||
precision: "autocast"
|
||||
|
@ -214,6 +214,9 @@ def layout():
|
||||
"This is useful in case you need to know how the generation is going while doign something else"
|
||||
"in another tab on your browser. Default: True")
|
||||
|
||||
st.session_state["defaults"].general.enable_suggestions = st.checkbox("Enable Suggestions Box", value=st.session_state['defaults'].general.enable_suggestions,
|
||||
help="Adds a suggestion box under the prompt when clicked. Default: True")
|
||||
|
||||
st.session_state["defaults"].daisi_app.running_on_daisi_io = st.checkbox("Running on Daisi.io?", value=st.session_state['defaults'].daisi_app.running_on_daisi_io,
|
||||
help="Specify if we are running on app.Daisi.io . Default: False")
|
||||
|
||||
|
@ -379,6 +379,9 @@ def layout():
|
||||
#prompt = st.text_area("Input Text","")
|
||||
placeholder = "A corgi wearing a top hat as an oil painting."
|
||||
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state["defaults"].general.enable_suggestions:
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
|
@ -109,6 +109,9 @@ try:
|
||||
except:
|
||||
pass
|
||||
|
||||
# disable diffusers telemetry
|
||||
os.environ["DISABLE_TELEMETRY"] = "YES"
|
||||
|
||||
# remove some annoying deprecation warnings that show every now and then.
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
|
@ -427,6 +427,9 @@ def layout():
|
||||
#prompt = st.text_area("Input Text","")
|
||||
placeholder = "A corgi wearing a top hat as an oil painting."
|
||||
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state["defaults"].general.enable_suggestions:
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
|
@ -1133,7 +1133,7 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
if not os.path.exists(model_path + "/model_index.json"):
|
||||
server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
|
||||
weights_path,
|
||||
use_local_file=True,
|
||||
#use_local_file=True,
|
||||
use_auth_token=st.session_state["defaults"].general.huggingface_token,
|
||||
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None,
|
||||
@ -1146,7 +1146,7 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
else:
|
||||
server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
|
||||
model_path,
|
||||
use_local_file=True,
|
||||
#use_local_file=True,
|
||||
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None,
|
||||
safety_checker=None, # Very important for videos...lots of false positives while interpolating
|
||||
@ -1182,6 +1182,7 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
server_state['no_half'] = st.session_state['defaults'].general.no_half
|
||||
server_state['optimized'] = st.session_state['defaults'].general.optimized
|
||||
|
||||
#with no_rerun:
|
||||
load_diffusers_model(weights_path, torch_device)
|
||||
else:
|
||||
logger.info("Tx2Vid Model already Loaded")
|
||||
@ -1387,6 +1388,7 @@ def txt2vid(
|
||||
#flaxpndms=flaxpndms_scheduler,
|
||||
)
|
||||
|
||||
with no_rerun:
|
||||
with st.session_state["progress_bar_text"].container():
|
||||
with hc.HyLoader('Loading Models...', hc.Loaders.standard_loaders,index=[0]):
|
||||
load_diffusers_model(weights_path, torch_device)
|
||||
@ -1632,6 +1634,9 @@ def layout():
|
||||
#prompt = st.text_area("Input Text","")
|
||||
placeholder = "A corgi wearing a top hat as an oil painting."
|
||||
prompt = st.text_area("Input Text","", placeholder=placeholder, height=54)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
if st.session_state["defaults"].general.enable_suggestions:
|
||||
sygil_suggestions.suggestion_area(placeholder)
|
||||
|
||||
if "defaults" in st.session_state:
|
||||
@ -1915,7 +1920,7 @@ def layout():
|
||||
#print("Loading models")
|
||||
# load the models when we hit the generate button for the first time, it wont be loaded after that so dont worry.
|
||||
#load_models(False, st.session_state["use_GFPGAN"], True, st.session_state["RealESRGAN_model"])
|
||||
with no_rerun:
|
||||
#with no_rerun:
|
||||
if st.session_state["use_GFPGAN"]:
|
||||
if "GFPGAN" in server_state:
|
||||
logger.info("GFPGAN already loaded")
|
||||
|
Loading…
Reference in New Issue
Block a user