From 302bdf0e09cc1a39d815bf8620844ac51bc505d3 Mon Sep 17 00:00:00 2001 From: Thomas Mello Date: Sat, 1 Oct 2022 23:39:56 +0300 Subject: [PATCH 01/15] fix: gradio js (#1375) --- frontend/js/index.js | 196 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 frontend/js/index.js diff --git a/frontend/js/index.js b/frontend/js/index.js new file mode 100644 index 0000000..4fc8d5e --- /dev/null +++ b/frontend/js/index.js @@ -0,0 +1,196 @@ +window.SD = (() => { + /* + * Painterro is made a field of the SD global object + * To provide convinience when using w() method in css_and_js.py + */ + class PainterroClass { + static isOpen = false; + static async init ({ x, toId }) { + console.log(x) + + const originalImage = x[2] === 'Mask' ? x[1]?.image : x[0]; + + if (window.Painterro === undefined) { + try { + await this.load(); + } catch (e) { + SDClass.error(e); + + return this.fallback(originalImage); + } + } + + if (this.isOpen) { + return this.fallback(originalImage); + } + this.isOpen = true; + + let resolveResult; + const paintClient = Painterro({ + hiddenTools: ['arrow'], + onHide: () => { + resolveResult?.(null); + }, + saveHandler: (image, done) => { + const data = image.asDataURL(); + + // ensures stable performance even + // when the editor is in interactive mode + SD.clearImageInput(SD.el.get(`#${toId}`)); + + resolveResult(data); + + done(true); + paintClient.hide(); + }, + }); + + const result = await new Promise((resolve) => { + resolveResult = resolve; + paintClient.show(originalImage); + }); + this.isOpen = false; + + return result ? this.success(result) : this.fallback(originalImage); + } + static success (result) { return [result, { image: result, mask: result }] }; + static fallback (image) { return [image, { image: image, mask: image }] }; + static load () { + return new Promise((resolve, reject) => { + const scriptId = '__painterro-script'; + if (document.getElementById(scriptId)) { + reject(new Error('Tried to load painterro script, but script tag already exists.')); + return; + } + + const styleId = '__painterro-css-override'; + if (!document.getElementById(styleId)) { + /* Ensure Painterro window is always on top */ + const style = document.createElement('style'); + style.id = styleId; + style.setAttribute('type', 'text/css'); + style.appendChild(document.createTextNode(` + .ptro-holder-wrapper { + z-index: 100; + } + `)); + document.head.appendChild(style); + } + + const script = document.createElement('script'); + script.id = scriptId; + script.src = 'https://unpkg.com/painterro@1.2.78/build/painterro.min.js'; + script.onload = () => resolve(true); + script.onerror = (e) => { + // remove self on error to enable reattempting load + document.head.removeChild(script); + reject(e); + }; + document.head.appendChild(script); + }); + } + } + + /* + * Turns out caching elements doesn't actually work in gradio + * As elements in tabs might get recreated + */ + class ElementCache { + #el; + constructor () { + this.root = document.querySelector('gradio-app').shadowRoot; + } + get (selector) { + return this.root.querySelector(selector); + } + } + + /* + * The main helper class to incapsulate functions + * that change gradio ui functionality + */ + class SDClass { + el = new ElementCache(); + Painterro = PainterroClass; + moveImageFromGallery ({ x, fromId, toId }) { + x = x[0]; + if (!Array.isArray(x) || x.length === 0) return; + + this.clearImageInput(this.el.get(`#${toId}`)); + + const i = this.#getGallerySelectedIndex(this.el.get(`#${fromId}`)); + + return [x[i].replace('data:;','data:image/png;')]; + } + async copyImageFromGalleryToClipboard ({ x, fromId }) { + x = x[0]; + if (!Array.isArray(x) || x.length === 0) return; + + const i = this.#getGallerySelectedIndex(this.el.get(`#${fromId}`)); + + const data = x[i]; + const blob = await (await fetch(data.replace('data:;','data:image/png;'))).blob(); + const item = new ClipboardItem({'image/png': blob}); + + await this.copyToClipboard([item]); + } + async copyFullOutput ({ fromId }) { + const textField = this.el.get(`#${fromId} .textfield`); + if (!textField) { + SDclass.error(new Error(`Can't find textfield with the output!`)); + } + + const value = textField.textContent.replace(/\s+/g,' ').replace(/: /g,':'); + + await this.copyToClipboard(value) + } + clickFirstVisibleButton({ rowId }) { + const generateButtons = this.el.get(`#${rowId}`).querySelectorAll('.gr-button-primary'); + + if (!generateButtons) return; + + for (let i = 0, arr = [...generateButtons]; i < arr.length; i++) { + const cs = window.getComputedStyle(arr[i]); + + if (cs.display !== 'none' && cs.visibility !== 'hidden') { + console.log(arr[i]); + + arr[i].click(); + break; + } + } + } + async gradioInputToClipboard ({ x }) { return this.copyToClipboard(x[0]); } + async copyToClipboard (value) { + if (!value || typeof value === 'boolean') return; + try { + if (Array.isArray(value) && + value.length && + value[0] instanceof ClipboardItem) { + await navigator.clipboard.write(value); + } else { + await navigator.clipboard.writeText(value); + } + } catch (e) { + SDClass.error(e); + } + } + static error (e) { + console.error(e); + if (typeof e === 'string') { + alert(e); + } else if(typeof e === 'object' && Object.hasOwn(e, 'message')) { + alert(e.message); + } + } + clearImageInput (imageEditor) { + imageEditor?.querySelector('.modify-upload button:last-child')?.click(); + } + #getGallerySelectedIndex (gallery) { + const selected = gallery.querySelector(`.\\!ring-2`); + return selected ? [...selected.parentNode.children].indexOf(selected) : 0; + } + } + + return new SDClass(); +})(); From a5c941329ed91ef79e46d074dbe60a70b4336778 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sat, 1 Oct 2022 21:47:26 +0100 Subject: [PATCH 02/15] Update img2txt.py --- scripts/img2txt.py | 100 ++++++++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/scripts/img2txt.py b/scripts/img2txt.py index 5a399a9..0826058 100644 --- a/scripts/img2txt.py +++ b/scripts/img2txt.py @@ -61,7 +61,8 @@ from ldm.models.blip import blip_decoder device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') blip_image_eval_size = 512 #blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth' - +server_state["clip_models"] = {} +server_state["preprocesses"] = {} def load_blip_model(): print("Loading BLIP Model") @@ -164,61 +165,60 @@ def interrogate(image, models): print("Ranking Text") for model_name in models: - print(f"Interrogating with {model_name}...") - st.session_state["log_message"].code(f"Interrogating with {model_name}...", language='') - - if "clip_model" not in server_state: - #with server_state_lock[server_state["clip_model"]]: - if model_name == 'ViT-H-14': - server_state["clip_model"], _, server_state["preprocess"] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s32b_b79k') - elif model_name == 'ViT-g-14': - server_state["clip_model"], _, server_state["preprocess"] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s12b_b42k') - else: - server_state["clip_model"], server_state["preprocess"] = clip.load(model_name, device=device) - - server_state["clip_model"] = server_state["clip_model"].cuda().eval() - - images = server_state["preprocess"](image).unsqueeze(0).cuda() + with torch.no_grad(), torch.autocast('cuda', dtype=torch.float16): + print(f"Interrogating with {model_name}...") + st.session_state["log_message"].code(f"Interrogating with {model_name}...", language='') + + if model_name not in server_state["clip_models"]: + if model_name == 'ViT-H-14': + server_state["clip_models"][model_name], _, server_state["preprocesses"][model_name] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s32b_b79k') + elif model_name == 'ViT-g-14': + server_state["clip_models"][model_name], _, server_state["preprocesses"][model_name] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s12b_b42k') + else: + server_state["clip_models"][model_name], server_state["preprocesses"][model_name] = clip.load(model_name, device=device) + server_state["clip_models"][model_name] = server_state["clip_models"][model_name].cuda().eval() + + images = server_state["preprocesses"][model_name](image).unsqueeze(0).cuda() + + + image_features = server_state["clip_models"][model_name].encode_image(images).float() - with torch.no_grad(): - image_features = server_state["clip_model"].encode_image(images).float() - - image_features /= image_features.norm(dim=-1, keepdim=True) + image_features /= image_features.norm(dim=-1, keepdim=True) - if st.session_state["defaults"].general.optimized: - clear_cuda() - - ranks = [] - ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["mediums"])) - ranks.append(batch_rank(server_state["clip_model"], image_features, ["by "+artist for artist in server_state["artists"]])) - ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["trending_list"])) - ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["movements"])) - ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["flavors"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["genres"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["styles"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["techniques"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["subjects"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["colors"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["moods"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["themes"])) - # ranks.append(batch_rank(server_state["clip_model"], image_features, server_state["keywords"])) + if st.session_state["defaults"].general.optimized: + clear_cuda() + + ranks = [] + ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["mediums"])) + ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, ["by "+artist for artist in server_state["artists"]])) + ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["trending_list"])) + ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["movements"])) + ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["flavors"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["genres"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["styles"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["techniques"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["subjects"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["colors"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["moods"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["themes"])) + # ranks.append(batch_rank(server_state["clip_models"][model_name], image_features, server_state["keywords"])) - for i in range(len(ranks)): - confidence_sum = 0 - for ci in range(len(ranks[i])): - confidence_sum += ranks[i][ci][1] - if confidence_sum > sum(bests[i][t][1] for t in range(len(bests[i]))): - bests[i] = ranks[i] + for i in range(len(ranks)): + confidence_sum = 0 + for ci in range(len(ranks[i])): + confidence_sum += ranks[i][ci][1] + if confidence_sum > sum(bests[i][t][1] for t in range(len(bests[i]))): + bests[i] = ranks[i] - row = [model_name] - for r in ranks: - row.append(', '.join([f"{x[0]} ({x[1]:0.1f}%)" for x in r])) + row = [model_name] + for r in ranks: + row.append(', '.join([f"{x[0]} ({x[1]:0.1f}%)" for x in r])) - table.append(row) + table.append(row) - if st.session_state["defaults"].general.optimized: - del server_state["clip_model"] - gc.collect() + if st.session_state["defaults"].general.optimized: + del server_state["clip_models"][model_name] + gc.collect() # for i in range(len(st.session_state["uploaded_image"])): st.session_state["prediction_table"][st.session_state["processed_image_count"]].dataframe(pd.DataFrame( From 2a49c28980aa2ce490ea1925767197053cd721b0 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sat, 1 Oct 2022 22:50:40 +0100 Subject: [PATCH 03/15] docker / local cache paths --- .gitignore | 1 + entrypoint.sh | 62 +++++++++++++++++++++++-------------- scripts/img2txt.py | 6 ++-- scripts/txt2vid.py | 77 ++++++++++++++++++++-------------------------- 4 files changed, 76 insertions(+), 70 deletions(-) diff --git a/.gitignore b/.gitignore index cbbe7b4..c415f1d 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,4 @@ condaenv.*.requirements.txt /gfpgan/* /models/* z_version_env.tmp +/user_data/* diff --git a/entrypoint.sh b/entrypoint.sh index 40a7868..5f838cf 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -24,7 +24,21 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) cd $SCRIPT_DIR export PYTHONPATH=$SCRIPT_DIR -MODEL_DIR="${SCRIPT_DIR}/model_cache" +if [[ $PUBLIC_KEY ]] +then + mkdir -p ~/.ssh + chmod 700 ~/.ssh + cd ~/.ssh + echo $PUBLIC_KEY >> authorized_keys + chmod 700 -R ~/.ssh + cd / + service ssh start + echo "SSH Service Started" +fi + + +MODEL_DIR="${SCRIPT_DIR}/user_data/model_cache" +mkdir -p $MODEL_DIR # Array of model files to pre-download # local filename # local path in container (no trailing slash) @@ -37,6 +51,17 @@ MODEL_FILES=( 'RealESRGAN_x4plus_anime_6B.pth src/realesrgan/experiments/pretrained_models https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth f872d837d3c90ed2e05227bed711af5671a6fd1c9f7d7e91c911a61f155e99da' 'project.yaml src/latent-diffusion/experiments/pretrained_models https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1 9d6ad53c5dafeb07200fb712db14b813b527edd262bc80ea136777bdb41be2ba' 'model.ckpt src/latent-diffusion/experiments/pretrained_models https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1 c209caecac2f97b4bb8f4d726b70ac2ac9b35904b7fc99801e1f5e61f9210c13' + 'waifu-diffusion.ckpt models/custom https://huggingface.co/crumb/pruned-waifu-diffusion/resolve/main/model-pruned.ckpt 9b31355f90fea9933847175d4731a033f49f861395addc7e153f480551a24c25' + 'trinart.ckpt models/custom https://huggingface.co/naclbit/trinart_stable_diffusion_v2/resolve/main/trinart2_step95000.ckpt c1799d22a355ba25c9ceeb6e3c91fc61788c8e274b73508ae8a15877c5dbcf63' + 'model__base_caption.pth models/blip https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth 96ac8749bd0a568c274ebe302b3a3748ab9be614c737f3d8c529697139174086' + 'pytorch_model.bin models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin f1a17cdbe0f36fec524f5cafb1c261ea3bbbc13e346e0f74fc9eb0460dedd0d3' + 'config.json models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/config.json 8a09b467700c58138c29d53c605b34ebc69beaadd13274a8a2af8ad2c2f4032a' + 'merges.txt models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/merges.txt 9fd691f7c8039210e0fced15865466c65820d09b63988b0174bfe25de299051a' + 'preprocessor_config.json models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/preprocessor_config.json 910e70b3956ac9879ebc90b22fb3bc8a75b6a0677814500101a4c072bd7857bd' + 'special_tokens_map.json models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/special_tokens_map.json f8c0d6c39aee3f8431078ef6646567b0aba7f2246e9c54b8b99d55c22b707cbf' + 'tokenizer.json models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/tokenizer.json a83e0809aa4c3af7208b2df632a7a69668c6d48775b3c3fe4e1b1199d1f8b8f4' + 'tokenizer_config.json models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/tokenizer_config.json deef455e52fa5e8151e339add0582e4235f066009601360999d3a9cda83b1129' + 'vocab.json models/clip-vit-large-patch14 https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/vocab.json 3f0c4f7d2086b61b38487075278ea9ed04edb53a03cbb045b86c27190fa8fb69' ) @@ -83,33 +108,24 @@ else validateDownloadModel ${model[0]} ${model[1]} ${model[2]} ${model[3]} fi done + ln -s models/clip-vit-large-patch14 user_data/model_cache/stable-diffusion-v1-4/tokenizer + ln -s models/clip-vit-large-patch14 user_data/model_cache/waifu-diffusion/tokenizer fi -# Determine which webserver interface to launch (Streamlit vs Default: Gradio) -if [[ ! -z $WEBUI_SCRIPT && $WEBUI_SCRIPT == "webui_streamlit.py" ]]; then - launch_command="streamlit run scripts/${WEBUI_SCRIPT:-webui.py} $WEBUI_ARGS" +cd ${MODEL_DIR} +if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then + git clone https://github.com/sd-webui/sd-concepts-library else - launch_command="python scripts/${WEBUI_SCRIPT:-webui.py} $WEBUI_ARGS" + cd sd-concepts-library + git pull + cd .. fi +ln -s ${MODEL_DIR}/sd-concepts-library/sd-concepts-library models/custom/sd-concepts-library -# Start webserver interface -launch_message="Starting Stable Diffusion WebUI... ${launch_command}..." -if [[ -z $WEBUI_RELAUNCH || $WEBUI_RELAUNCH == "true" ]]; then - n=0 - while true; do - echo $launch_message +echo "export TRANSFORMERS_CACHE=${MODEL_DIR}" >> ~/.bashrc - if (( $n > 0 )); then - echo "Relaunch count: ${n}" - fi +launch_command="streamlit run scripts/webui_streamlit.py" - $launch_command +$launch_command - echo "entrypoint.sh: Process is ending. Relaunching in 0.5s..." - ((n++)) - sleep 0.5 - done -else - echo $launch_message - $launch_command -fi +sleep infinity diff --git a/scripts/img2txt.py b/scripts/img2txt.py index 0826058..9410b2b 100644 --- a/scripts/img2txt.py +++ b/scripts/img2txt.py @@ -171,11 +171,11 @@ def interrogate(image, models): if model_name not in server_state["clip_models"]: if model_name == 'ViT-H-14': - server_state["clip_models"][model_name], _, server_state["preprocesses"][model_name] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s32b_b79k') + server_state["clip_models"][model_name], _, server_state["preprocesses"][model_name] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s32b_b79k', cache_dir='user_data/model_cache/clip') elif model_name == 'ViT-g-14': - server_state["clip_models"][model_name], _, server_state["preprocesses"][model_name] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s12b_b42k') + server_state["clip_models"][model_name], _, server_state["preprocesses"][model_name] = open_clip.create_model_and_transforms(model_name, pretrained='laion2b_s12b_b42k', cache_dir='user_data/model_cache/clip') else: - server_state["clip_models"][model_name], server_state["preprocesses"][model_name] = clip.load(model_name, device=device) + server_state["clip_models"][model_name], server_state["preprocesses"][model_name] = clip.load(model_name, device=device, download_root='user_data/model_cache/clip') server_state["clip_models"][model_name] = server_state["clip_models"][model_name].cuda().eval() images = server_state["preprocesses"][model_name](image).unsqueeze(0).cuda() diff --git a/scripts/txt2vid.py b/scripts/txt2vid.py index 4f3bdc9..bc4f451 100644 --- a/scripts/txt2vid.py +++ b/scripts/txt2vid.py @@ -230,58 +230,47 @@ def load_diffusers_model(weights_path,torch_device): try: with server_state_lock["pipe"]: - try: - if not "pipe" in st.session_state or st.session_state["weights_path"] != weights_path: - if st.session_state["weights_path"] != weights_path: - del st.session_state["weights_path"] - - st.session_state["weights_path"] = weights_path - server_state["pipe"] = StableDiffusionPipeline.from_pretrained( - weights_path, - use_local_file=True, - use_auth_token=st.session_state["defaults"].general.huggingface_token, - torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None, - revision="fp16" if not st.session_state['defaults'].general.no_half else None - ) - - server_state["pipe"].unet.to(torch_device) - server_state["pipe"].vae.to(torch_device) - server_state["pipe"].text_encoder.to(torch_device) - - if st.session_state.defaults.general.enable_attention_slicing: - server_state["pipe"].enable_attention_slicing() - - if st.session_state.defaults.general.enable_minimal_memory_usage: - server_state["pipe"].enable_minimal_memory_usage() - - print("Tx2Vid Model Loaded") - else: - print("Tx2Vid Model already Loaded") + if not "pipe" in st.session_state or st.session_state["weights_path"] != weights_path: + if st.session_state["weights_path"] != weights_path: + del st.session_state["weights_path"] - except: - #del st.session_state["weights_path"] - #del server_state["pipe"] - st.session_state["weights_path"] = weights_path - server_state["pipe"] = StableDiffusionPipeline.from_pretrained( - weights_path, - use_local_file=True, - use_auth_token=st.session_state["defaults"].general.huggingface_token, - torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None, - revision="fp16" if not st.session_state['defaults'].general.no_half else None - ) - + # if folder "user_data/model_cache/stable-diffusion-v1-4" exists, load the model from there + if weights_path == "CompVisLab/stable-diffusion-v1-4": + model_path = os.path.join("user_data", "model_cache", "stable-diffusion-v1-4") + elif weights_path == "hakurei/waifu-diffusion": + model_path = os.path.join("user_data", "model_cache", "waifu-diffusion") + + if not os.path.exists(model_path): + server_state["pipe"] = StableDiffusionPipeline.from_pretrained( + weights_path, + use_local_file=True, + use_auth_token=st.session_state["defaults"].general.huggingface_token, + torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None, + revision="fp16" if not st.session_state['defaults'].general.no_half else None + ) + StableDiffusionPipeline.save_pretrained(server_state["pipe"], model_path) + else: + server_state["pipe"] = StableDiffusionPipeline.from_pretrained( + model_path, + use_local_file=True, + torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None, + revision="fp16" if not st.session_state['defaults'].general.no_half else None + ) + server_state["pipe"].unet.to(torch_device) server_state["pipe"].vae.to(torch_device) server_state["pipe"].text_encoder.to(torch_device) - + if st.session_state.defaults.general.enable_attention_slicing: server_state["pipe"].enable_attention_slicing() - - if st.session_state.defaults.general.enable_minimal_memory_usage: + + if st.session_state.defaults.general.enable_minimal_memory_usage: server_state["pipe"].enable_minimal_memory_usage() - - print("Tx2Vid Model Loaded") + + print("Tx2Vid Model Loaded") + else: + print("Tx2Vid Model already Loaded") except (EnvironmentError, OSError): st.session_state["progress_bar_text"].error( "You need a huggingface token in order to use the Text to Video tab. Use the Settings page from the sidebar on the left to add your token." From 7c74a5ad69237aa68fe1c993ff476066332daeb5 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sat, 1 Oct 2022 23:02:41 +0100 Subject: [PATCH 04/15] Update entrypoint.sh --- entrypoint.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/entrypoint.sh b/entrypoint.sh index 5f838cf..7ebc799 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -108,23 +108,27 @@ else validateDownloadModel ${model[0]} ${model[1]} ${model[2]} ${model[3]} fi done - ln -s models/clip-vit-large-patch14 user_data/model_cache/stable-diffusion-v1-4/tokenizer - ln -s models/clip-vit-large-patch14 user_data/model_cache/waifu-diffusion/tokenizer + mkdir -p ${MODEL_DIR}/stable-diffusion-v1-4/tokenizer + mkdir -p ${MODEL_DIR}/waifu-diffusion/tokenizer + + ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14 ${MODEL_DIR}/stable-diffusion-v1-4/tokenizer + ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14 ${MODEL_DIR}/waifu-diffusion/tokenizer fi cd ${MODEL_DIR} if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then - git clone https://github.com/sd-webui/sd-concepts-library -else - cd sd-concepts-library + cd ${MODEL_DIR}/sd-concepts-library git pull cd .. +else + git clone https://github.com/sd-webui/sd-concepts-library fi -ln -s ${MODEL_DIR}/sd-concepts-library/sd-concepts-library models/custom/sd-concepts-library +mkdir -p ${SCRIPT_DIR}/models/custom/sd-concepts-library +ln -fs ${MODEL_DIR}/sd-concepts-library/sd-concepts-library ${SCRIPT_DIR}/models/custom/sd-concepts-library echo "export TRANSFORMERS_CACHE=${MODEL_DIR}" >> ~/.bashrc -launch_command="streamlit run scripts/webui_streamlit.py" +launch_command="streamlit run ${SCRIPT_DIR}/scripts/webui_streamlit.py" $launch_command From c1db30d41d03c17b087374f2a14416904e26a3ce Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sat, 1 Oct 2022 23:36:09 +0100 Subject: [PATCH 05/15] Update entrypoint.sh --- entrypoint.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/entrypoint.sh b/entrypoint.sh index 7ebc799..a99da27 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -115,11 +115,10 @@ else ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14 ${MODEL_DIR}/waifu-diffusion/tokenizer fi -cd ${MODEL_DIR} if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then cd ${MODEL_DIR}/sd-concepts-library git pull - cd .. + cd $SCRIPT_DIR else git clone https://github.com/sd-webui/sd-concepts-library fi From 0fcab436cfab3ec968445c5d25d6e7932681c1c1 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sat, 1 Oct 2022 23:49:40 +0100 Subject: [PATCH 06/15] Update entrypoint.sh --- entrypoint.sh | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/entrypoint.sh b/entrypoint.sh index a99da27..17c9883 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -108,11 +108,11 @@ else validateDownloadModel ${model[0]} ${model[1]} ${model[2]} ${model[3]} fi done - mkdir -p ${MODEL_DIR}/stable-diffusion-v1-4/tokenizer - mkdir -p ${MODEL_DIR}/waifu-diffusion/tokenizer + mkdir -p ${MODEL_DIR}/stable-diffusion-v1-4 + mkdir -p ${MODEL_DIR}/waifu-diffusion - ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14 ${MODEL_DIR}/stable-diffusion-v1-4/tokenizer - ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14 ${MODEL_DIR}/waifu-diffusion/tokenizer + ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14/ ${MODEL_DIR}/stable-diffusion-v1-4/tokenizer + ln -fs ${SCRIPT_DIR}/models/clip-vit-large-patch14/ ${MODEL_DIR}/waifu-diffusion/tokenizer fi if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then @@ -122,10 +122,13 @@ if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then else git clone https://github.com/sd-webui/sd-concepts-library fi -mkdir -p ${SCRIPT_DIR}/models/custom/sd-concepts-library -ln -fs ${MODEL_DIR}/sd-concepts-library/sd-concepts-library ${SCRIPT_DIR}/models/custom/sd-concepts-library +mkdir -p ${SCRIPT_DIR}/models/custom +ln -fs ${MODEL_DIR}/sd-concepts-library/sd-concepts-library ${SCRIPT_DIR}/models/custom +echo "export HF_HOME=${MODEL_DIR}" >> ~/.bashrc +echo "export XDG_CACHE_HOME=${MODEL_DIR}" >> ~/.bashrc echo "export TRANSFORMERS_CACHE=${MODEL_DIR}" >> ~/.bashrc +source ~/.bashrc launch_command="streamlit run ${SCRIPT_DIR}/scripts/webui_streamlit.py" From 47e340dc2c0492c388c8a7c3a332e08fdbfeb8f5 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sat, 1 Oct 2022 23:59:50 +0100 Subject: [PATCH 07/15] Update txt2vid.py --- scripts/txt2vid.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/txt2vid.py b/scripts/txt2vid.py index bc4f451..165dc63 100644 --- a/scripts/txt2vid.py +++ b/scripts/txt2vid.py @@ -231,12 +231,12 @@ def load_diffusers_model(weights_path,torch_device): try: with server_state_lock["pipe"]: if not "pipe" in st.session_state or st.session_state["weights_path"] != weights_path: - if st.session_state["weights_path"] != weights_path: + if ("weights_path" in st.session_state) and st.session_state["weights_path"] != weights_path: del st.session_state["weights_path"] st.session_state["weights_path"] = weights_path # if folder "user_data/model_cache/stable-diffusion-v1-4" exists, load the model from there - if weights_path == "CompVisLab/stable-diffusion-v1-4": + if weights_path == "CompVis/stable-diffusion-v1-4": model_path = os.path.join("user_data", "model_cache", "stable-diffusion-v1-4") elif weights_path == "hakurei/waifu-diffusion": model_path = os.path.join("user_data", "model_cache", "waifu-diffusion") From d017fe7af6e41bd5bbedb75cec63014a3c5c7c55 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:04:08 +0100 Subject: [PATCH 08/15] Update modules.py --- ldm/modules/encoders/modules.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py index ededbe4..5d6124d 100644 --- a/ldm/modules/encoders/modules.py +++ b/ldm/modules/encoders/modules.py @@ -5,7 +5,7 @@ import clip from einops import rearrange, repeat from transformers import CLIPTokenizer, CLIPTextModel import kornia - +import os from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test @@ -138,8 +138,12 @@ class FrozenCLIPEmbedder(AbstractEncoder): """Uses the CLIP transformer encoder for text (from Hugging Face)""" def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) + if os.path.exists("models/clip-vit-large-patch14"): + self.tokenizer = CLIPTokenizer.from_pretrained("models/clip-vit-large-patch14") + self.transformer = CLIPTextModel.from_pretrained("models/clip-vit-large-patch14") + else: + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) self.device = device self.max_length = max_length self.freeze() From 4b6de58ae46aa5b122b8f6bf92b61be5226e44bd Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:27:58 +0100 Subject: [PATCH 09/15] Update txt2vid.py --- scripts/txt2vid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/txt2vid.py b/scripts/txt2vid.py index 165dc63..e8013e6 100644 --- a/scripts/txt2vid.py +++ b/scripts/txt2vid.py @@ -241,7 +241,7 @@ def load_diffusers_model(weights_path,torch_device): elif weights_path == "hakurei/waifu-diffusion": model_path = os.path.join("user_data", "model_cache", "waifu-diffusion") - if not os.path.exists(model_path): + if not os.path.exists(model_path + "config.json"): server_state["pipe"] = StableDiffusionPipeline.from_pretrained( weights_path, use_local_file=True, From dd461037a6041e8d0b830e1f070b4b7c17ae6410 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:28:01 +0100 Subject: [PATCH 10/15] Update webui_streamlit.yaml --- configs/webui/webui_streamlit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/webui/webui_streamlit.yaml b/configs/webui/webui_streamlit.yaml index 63dfbe6..71ec419 100644 --- a/configs/webui/webui_streamlit.yaml +++ b/configs/webui/webui_streamlit.yaml @@ -293,7 +293,7 @@ img2img: write_info_files: True img2txt: - batch_size: 100 + batch_size: 420 blip_image_eval_size: 512 concepts_library: concepts_per_page: 12 From babf6c4fc07d07e14d9799556bedee202146a3f9 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:46:51 +0100 Subject: [PATCH 11/15] Update txt2vid.py --- scripts/txt2vid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/txt2vid.py b/scripts/txt2vid.py index e8013e6..685bbf7 100644 --- a/scripts/txt2vid.py +++ b/scripts/txt2vid.py @@ -241,7 +241,7 @@ def load_diffusers_model(weights_path,torch_device): elif weights_path == "hakurei/waifu-diffusion": model_path = os.path.join("user_data", "model_cache", "waifu-diffusion") - if not os.path.exists(model_path + "config.json"): + if not os.path.exists(model_path + "/config.json"): server_state["pipe"] = StableDiffusionPipeline.from_pretrained( weights_path, use_local_file=True, From a3f94d349115bd225da1dfa3e4069d67205a034f Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:52:53 +0100 Subject: [PATCH 12/15] Update txt2vid.py --- scripts/txt2vid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/txt2vid.py b/scripts/txt2vid.py index 685bbf7..905d999 100644 --- a/scripts/txt2vid.py +++ b/scripts/txt2vid.py @@ -241,7 +241,7 @@ def load_diffusers_model(weights_path,torch_device): elif weights_path == "hakurei/waifu-diffusion": model_path = os.path.join("user_data", "model_cache", "waifu-diffusion") - if not os.path.exists(model_path + "/config.json"): + if not os.path.exists(model_path + "/model_index.json"): server_state["pipe"] = StableDiffusionPipeline.from_pretrained( weights_path, use_local_file=True, From 814cf8597c2494de305e4e6f438a4338936723bb Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 01:39:09 +0100 Subject: [PATCH 13/15] Update entrypoint.sh --- entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/entrypoint.sh b/entrypoint.sh index 17c9883..046119e 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -129,7 +129,7 @@ echo "export HF_HOME=${MODEL_DIR}" >> ~/.bashrc echo "export XDG_CACHE_HOME=${MODEL_DIR}" >> ~/.bashrc echo "export TRANSFORMERS_CACHE=${MODEL_DIR}" >> ~/.bashrc source ~/.bashrc - +cd $SCRIPT_DIR launch_command="streamlit run ${SCRIPT_DIR}/scripts/webui_streamlit.py" $launch_command From 511730de88db151b3bba6eee5ea0d52b3559b8eb Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 02:03:17 +0100 Subject: [PATCH 14/15] Update entrypoint.sh --- entrypoint.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/entrypoint.sh b/entrypoint.sh index 046119e..952a6df 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -120,6 +120,7 @@ if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then git pull cd $SCRIPT_DIR else + cd ${MODEL_DIR} git clone https://github.com/sd-webui/sd-concepts-library fi mkdir -p ${SCRIPT_DIR}/models/custom From 59f4826e3e93599dcbf0ef70bd34241c2caddf92 Mon Sep 17 00:00:00 2001 From: hlky <106811348+hlky@users.noreply.github.com> Date: Sun, 2 Oct 2022 02:04:31 +0100 Subject: [PATCH 15/15] Update entrypoint.sh --- entrypoint.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/entrypoint.sh b/entrypoint.sh index 952a6df..c1d9043 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -118,7 +118,6 @@ fi if [[ -e "${MODEL_DIR}/sd-concepts-library" ]]; then cd ${MODEL_DIR}/sd-concepts-library git pull - cd $SCRIPT_DIR else cd ${MODEL_DIR} git clone https://github.com/sd-webui/sd-concepts-library