The Merge (#1652)
40
.github/workflows/deploy.yml
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
name: Deploy to GitHub Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
# Review gh actions docs if you want to further define triggers, paths, etc
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#on
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
cache: yarn
|
||||
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: Build website
|
||||
run: yarn build
|
||||
|
||||
# Popular action to deploy to GitHub Pages:
|
||||
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Build output to publish to the `gh-pages` branch:
|
||||
publish_dir: ./build
|
||||
# The following lines assign commit authorship to the official
|
||||
# GH-Actions bot for deploys to `gh-pages` branch:
|
||||
# https://github.com/actions/checkout/issues/13#issuecomment-724415212
|
||||
# The GH actions bot is used by default if you didn't specify the two fields.
|
||||
# You can swap them out with your own user credentials.
|
||||
user_name: github-actions[bot]
|
||||
user_email: 41898282+github-actions[bot]@users.noreply.github.com
|
24
.github/workflows/test-deploy.yml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
name: Test deployment
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
# Review gh actions docs if you want to further define triggers, paths, etc
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#on
|
||||
|
||||
jobs:
|
||||
test-deploy:
|
||||
name: Test deployment
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
cache: yarn
|
||||
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
- name: Test build website
|
||||
run: yarn build
|
25
.gitignore
vendored
@ -55,6 +55,8 @@ condaenv.*.requirements.txt
|
||||
# =========================================================================== #
|
||||
/configs/webui/userconfig_streamlit.yaml
|
||||
/custom-conda-path.txt
|
||||
!/src/components/*
|
||||
!/src/pages/*
|
||||
/src/*
|
||||
/outputs
|
||||
/model_cache
|
||||
@ -65,4 +67,25 @@ condaenv.*.requirements.txt
|
||||
/models/*
|
||||
z_version_env.tmp
|
||||
scripts/bridgeData.py
|
||||
/user_data/*
|
||||
/user_data/*
|
||||
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
@ -129,7 +129,7 @@
|
||||
"\n",
|
||||
"## Streamlit\n",
|
||||
"\n",
|
||||
"![](images/streamlit/streamlit-t2i.png)\n",
|
||||
"![](https://github.com/aedhcarrick/sygil-webui/blob/patch-2/images/streamlit/streamlit-t2i.png?raw=1)\n",
|
||||
"\n",
|
||||
"**Features:**\n",
|
||||
"\n",
|
||||
@ -148,7 +148,7 @@
|
||||
"\n",
|
||||
"## Gradio\n",
|
||||
"\n",
|
||||
"![](images/gradio/gradio-t2i.png)\n",
|
||||
"![](https://github.com/aedhcarrick/sygil-webui/blob/patch-2/images/gradio/gradio-t2i.png?raw=1)\n",
|
||||
"\n",
|
||||
"**Features:**\n",
|
||||
"\n",
|
||||
@ -166,7 +166,7 @@
|
||||
"\n",
|
||||
"### GFPGAN\n",
|
||||
"\n",
|
||||
"![](images/GFPGAN.png)\n",
|
||||
"![](https://github.com/aedhcarrick/sygil-webui/blob/patch-2/images/GFPGAN.png?raw=1)\n",
|
||||
"\n",
|
||||
"Lets you improve faces in pictures using the GFPGAN model. There is a checkbox in every tab to use GFPGAN at 100%, and also a separate tab that just allows you to use GFPGAN on any picture, with a slider that controls how strong the effect is.\n",
|
||||
"\n",
|
||||
@ -176,7 +176,7 @@
|
||||
"\n",
|
||||
"### RealESRGAN\n",
|
||||
"\n",
|
||||
"![](images/RealESRGAN.png)\n",
|
||||
"![](https://github.com/aedhcarrick/sygil-webui/blob/patch-2/images/RealESRGAN.png?raw=1)\n",
|
||||
"\n",
|
||||
"Lets you double the resolution of generated images. There is a checkbox in every tab to use RealESRGAN, and you can choose between the regular upscaler and the anime version.\n",
|
||||
"There is also a separate tab for using RealESRGAN on any picture.\n",
|
||||
@ -265,7 +265,62 @@
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Setup"
|
||||
"# Config options for Colab instance\n",
|
||||
"> Before running, make sure GPU backend is enabled. (Unless you plan on generating with Stable Horde)\n",
|
||||
">> Runtime -> Change runtime type -> Hardware Accelerator -> GPU (Make sure to save)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "iegma7yteERV"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title { display-mode: \"form\" }\n",
|
||||
"#@markdown WebUI repo (and branch)\n",
|
||||
"repo_name = \"Sygil-Dev/sygil-webui\" #@param {type:\"string\"}\n",
|
||||
"repo_branch = \"dev\" #@param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"#@markdown Mount Google Drive\n",
|
||||
"mount_google_drive = True #@param {type:\"boolean\"}\n",
|
||||
"save_outputs_to_drive = True #@param {type:\"boolean\"}\n",
|
||||
"#@markdown Folder in Google Drive to search for custom models\n",
|
||||
"MODEL_DIR = \"sygil-webui/models\" #@param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"#@markdown Folder in Google Drive to look for custom config file (streamlit.yaml)\n",
|
||||
"CONFIG_DIR = \"sygil-webui\" #@param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"#@markdown Enter auth token from Huggingface.co\n",
|
||||
"#@markdown >(required for downloading stable diffusion model.)\n",
|
||||
"HF_TOKEN = \"\" #@param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"#@markdown Select which models to prefetch\n",
|
||||
"STABLE_DIFFUSION = True #@param {type:\"boolean\"}\n",
|
||||
"WAIFU_DIFFUSION = False #@param {type:\"boolean\"}\n",
|
||||
"TRINART_SD = False #@param {type:\"boolean\"}\n",
|
||||
"SD_WD_LD_TRINART_MERGED = False #@param {type:\"boolean\"}\n",
|
||||
"GFPGAN = True #@param {type:\"boolean\"}\n",
|
||||
"REALESRGAN = True #@param {type:\"boolean\"}\n",
|
||||
"LDSR = True #@param {type:\"boolean\"}\n",
|
||||
"BLIP_MODEL = False #@param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"#@markdown Save models to Google Drive for faster loading in future (Be warned! Make sure you have enough space!)\n",
|
||||
"SAVE_MODELS = False #@param {type:\"boolean\"}"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "OXn96M9deVtF"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
">Runtime will crash when installing conda. This is normal as we are forcing a restart of the runtime from code.\n",
|
||||
"\n",
|
||||
">Just hit \"Run All\" again. 😑"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IZjJSr-WPNxB"
|
||||
@ -277,6 +332,7 @@
|
||||
"id": "eq0-E5mjSpmP"
|
||||
},
|
||||
"source": [
|
||||
"#@title Make sure we have access to GPU backend\n",
|
||||
"!nvidia-smi -L"
|
||||
],
|
||||
"execution_count": null,
|
||||
@ -285,14 +341,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Install miniConda (mamba)\n",
|
||||
"!pip install condacolab\n",
|
||||
"import condacolab\n",
|
||||
"condacolab.install_from_url(\"https://github.com/conda-forge/miniforge/releases/download/4.14.0-0/Mambaforge-4.14.0-0-Linux-x86_64.sh\")\n",
|
||||
"\n",
|
||||
"import condacolab\n",
|
||||
"condacolab.check()\n",
|
||||
"\n",
|
||||
"# The runtime will crash after this, its normal as we are forcing a restart of the runtime from code. Just hit \"Run All\" again."
|
||||
"# The runtime will crash here!!! Don't panic! We planned for this remember?"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "cDu33xkdJ5mD"
|
||||
@ -303,11 +359,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!git clone https://github.com/Sygil-Dev/sygil-webui.git\n",
|
||||
"%cd /content/sygil-webui/\n",
|
||||
"!git checkout dev\n",
|
||||
"!git pull\n",
|
||||
"!wget -O arial.ttf https://github.com/matomo-org/travis-scripts/blob/master/fonts/Arial.ttf?raw=true"
|
||||
"#@title Clone webUI repo and download font\n",
|
||||
"import os\n",
|
||||
"REPO_URL = os.path.join('https://github.com', repo_name)\n",
|
||||
"PATH_TO_REPO = os.path.join('/content', repo_name.split('/')[1])\n",
|
||||
"!git clone {REPO_URL}\n",
|
||||
"%cd {PATH_TO_REPO}\n",
|
||||
"!git checkout {repo_branch}\n",
|
||||
"!git pull"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pZHGf03Vp305"
|
||||
@ -318,7 +377,10 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!mamba install cudatoolkit=11.3 git numpy=1.22.3 pip=20.3 python=3.8.5 pytorch=1.11.0 scikit-image=0.19.2 torchvision=0.12.0 -y"
|
||||
"#@title Install dependencies\n",
|
||||
"!mamba install cudatoolkit=11.3 git numpy=1.22.3 pip=20.3 python=3.8.5 pytorch=1.11.0 scikit-image=0.19.2 torchvision=0.12.0 -y\n",
|
||||
"!python --version\n",
|
||||
"!pip install -r requirements.txt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "dmN2igp5Yk3z"
|
||||
@ -329,52 +391,29 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Install dependencies.\n",
|
||||
"!python --version\n",
|
||||
"!pip install -r requirements.txt"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "vXX0OaR8KyLQ"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Install localtunnel to openGoogle's ports\n",
|
||||
"!npm install localtunnel"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "FHyVuT5aSM2G"
|
||||
"id": "Nxaxfgo_F8Am"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"#Launch the WebUI"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "csi6cj6gQZmC"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Mount Google Drive\n",
|
||||
"import os\n",
|
||||
"mount_google_drive = True #@param {type:\"boolean\"}\n",
|
||||
"save_outputs_to_drive = True #@param {type:\"boolean\"}\n",
|
||||
"\n",
|
||||
"#@title Mount Google Drive (if selected)\n",
|
||||
"if mount_google_drive:\n",
|
||||
" # Mount google drive to store your outputs.\n",
|
||||
" # Mount google drive to store outputs.\n",
|
||||
" from google.colab import drive\n",
|
||||
" drive.mount('/content/drive/', force_remount=True)\n",
|
||||
"\n",
|
||||
"if save_outputs_to_drive:\n",
|
||||
" os.makedirs(\"/content/drive/MyDrive/sygil-webui/outputs\", exist_ok=True)\n",
|
||||
" os.symlink(\"/content/drive/MyDrive/sygil-webui/outputs\", \"/content/sygil-webui/outputs\", target_is_directory=True)\n"
|
||||
" # Make symlink to redirect downloads\n",
|
||||
" OUTPUT_PATH = os.path.join('/content/drive/MyDrive', repo_name.split('/')[1], 'outputs')\n",
|
||||
" os.makedirs(OUTPUT_PATH, exist_ok=True)\n",
|
||||
" os.symlink(OUTPUT_PATH, os.path.join(PATH_TO_REPO, 'outputs'), target_is_directory=True)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pcSWo9Zkzbsf"
|
||||
@ -385,20 +424,140 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Enter Huggingface token\n",
|
||||
"!git config --global credential.helper store\n",
|
||||
"!huggingface-cli login"
|
||||
"#@title Pre-fetch models\n",
|
||||
"%cd {PATH_TO_REPO}\n",
|
||||
"# make list of models we want to download\n",
|
||||
"model_list = {\n",
|
||||
" 'stable_diffusion': f'{STABLE_DIFFUSION}',\n",
|
||||
" 'waifu_diffusion': f'{WAIFU_DIFFUSION}',\n",
|
||||
" 'trinart_stable_diffusion': f'{TRINART_SD}',\n",
|
||||
" 'sd_wd_ld_trinart_merged': f'{SD_WD_LD_TRINART_MERGED}',\n",
|
||||
" 'gfpgan': f'{GFPGAN}',\n",
|
||||
" 'realesrgan': f'{REALESRGAN}',\n",
|
||||
" 'ldsr': f'{LDSR}',\n",
|
||||
" 'blip_model': f'{BLIP_MODEL}'}\n",
|
||||
"download_list = {k for (k,v) in model_list.items() if v == 'True'}\n",
|
||||
"\n",
|
||||
"# get model info (file name, download link, save location)\n",
|
||||
"import yaml\n",
|
||||
"from pprint import pprint\n",
|
||||
"with open('configs/webui/webui_streamlit.yaml') as f:\n",
|
||||
" dataMap = yaml.safe_load(f)\n",
|
||||
"models = dataMap['model_manager']['models']\n",
|
||||
"existing_models = []\n",
|
||||
"\n",
|
||||
"# copy script from model manager\n",
|
||||
"import requests, time, shutil\n",
|
||||
"from requests.auth import HTTPBasicAuth\n",
|
||||
"\n",
|
||||
"if MODEL_DIR != \"\":\n",
|
||||
" MODEL_DIR = os.path.join('/content/drive/MyDrive', MODEL_DIR)\n",
|
||||
"else:\n",
|
||||
" MODEL_DIR = '/content/drive/MyDrive'\n",
|
||||
"\n",
|
||||
"def download_file(file_name, file_path, file_url):\n",
|
||||
" os.makedirs(file_path, exist_ok=True)\n",
|
||||
" link_path = os.path.join(MODEL_DIR, file_name)\n",
|
||||
" full_path = os.path.join(file_path, file_name)\n",
|
||||
" if os.path.exists(link_path):\n",
|
||||
" print( file_name + \" found in Google Drive\")\n",
|
||||
" if not os.path.exists(full_path):\n",
|
||||
" print( \" creating symlink...\")\n",
|
||||
" os.symlink(link_path, full_path)\n",
|
||||
" else:\n",
|
||||
" print( \" symlink already exists\")\n",
|
||||
" elif not os.path.exists(full_path):\n",
|
||||
" print( \"Downloading \" + file_name + \"...\", end=\"\" )\n",
|
||||
" token = None\n",
|
||||
" if \"huggingface.co\" in file_url:\n",
|
||||
" token = HTTPBasicAuth('token', HF_TOKEN)\n",
|
||||
" try:\n",
|
||||
" with requests.get(file_url, auth = token, stream=True) as r:\n",
|
||||
" starttime = time.time()\n",
|
||||
" r.raise_for_status()\n",
|
||||
" with open(full_path, 'wb') as f:\n",
|
||||
" for chunk in r.iter_content(chunk_size=8192):\n",
|
||||
" f.write(chunk)\n",
|
||||
" if ((time.time() - starttime) % 60.0) > 2 :\n",
|
||||
" starttime = time.time()\n",
|
||||
" print( \".\", end=\"\" )\n",
|
||||
" print( \"done\" )\n",
|
||||
" print( \" \" + file_name + \" downloaded to \\'\" + file_path + \"\\'\" )\n",
|
||||
" if SAVE_MODELS and os.path.exists(MODEL_DIR):\n",
|
||||
" shutil.copy2(full_path,MODEL_DIR)\n",
|
||||
" print( \" Saved \" + file_name + \" to \" + MODEL_DIR)\n",
|
||||
" except:\n",
|
||||
" print( \"Failed to download \" + file_name + \".\" )\n",
|
||||
" return\n",
|
||||
" else:\n",
|
||||
" print( full_path + \" already exists.\" )\n",
|
||||
" existing_models.append(file_name)\n",
|
||||
"\n",
|
||||
"# download models in list\n",
|
||||
"for model in download_list:\n",
|
||||
" model_name = models[model]['model_name']\n",
|
||||
" file_info = models[model]['files']\n",
|
||||
" for file in file_info:\n",
|
||||
" file_name = file_info[file]['file_name']\n",
|
||||
" file_url = file_info[file]['download_link']\n",
|
||||
" if 'save_location' in file_info[file]:\n",
|
||||
" file_path = file_info[file]['save_location']\n",
|
||||
" else: \n",
|
||||
" file_path = models[model]['save_location']\n",
|
||||
" download_file(file_name, file_path, file_url)\n",
|
||||
"\n",
|
||||
"# add custom models not in list\n",
|
||||
"CUSTOM_MODEL_DIR = os.path.join(PATH_TO_REPO, 'models/custom')\n",
|
||||
"if os.path.exists(MODEL_DIR):\n",
|
||||
" custom_models = os.listdir(MODEL_DIR)\n",
|
||||
" custom_models = [m for m in custom_models if os.path.isfile(MODEL_DIR + '/' + m)]\n",
|
||||
" os.makedirs(CUSTOM_MODEL_DIR, exist_ok=True)\n",
|
||||
" print( \"Custom model(s) found: \" )\n",
|
||||
" for m in custom_models:\n",
|
||||
" if m in existing_models:\n",
|
||||
" continue\n",
|
||||
" full_path = os.path.join(CUSTOM_MODEL_DIR, m)\n",
|
||||
" if not os.path.exists(full_path):\n",
|
||||
" print( \" \" + m )\n",
|
||||
" os.symlink(os.path.join(MODEL_DIR , m), full_path)\n",
|
||||
"\n",
|
||||
"# get custom config file if it exists\n",
|
||||
"if CONFIG_DIR != \"\":\n",
|
||||
" CONFIG_FILE = os.path.join('/content/drive/MyDrive', CONFIG_DIR, 'userconfig_streamlit.yaml')\n",
|
||||
" config_location = os.path.join(PATH_TO_REPO, 'configs/webui/userconfig_streamlit.yaml')\n",
|
||||
" if os.path.exists(CONFIG_FILE) and not os.path.exists(config_location):\n",
|
||||
" os.symlink(CONFIG_DIR, config_location)\n",
|
||||
"\n",
|
||||
"\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IsbG7fvIrKwg"
|
||||
"id": "vMdmh81J70yA"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Launch the web ui server\n",
|
||||
"### (optional) JS to prevent idle timeout:\n",
|
||||
"Press 'F12' OR ('CTRL' + 'SHIFT' + 'I') OR right click on this website -> inspect. Then click on the console tab and paste in the following code.\n",
|
||||
"```js,\n",
|
||||
"function ClickConnect(){\n",
|
||||
"console.log(\"Working\");\n",
|
||||
"document.querySelector(\"colab-toolbar-button#connect\").click()\n",
|
||||
"}\n",
|
||||
"setInterval(ClickConnect,60000)\n",
|
||||
"```"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pjIjiCuJysJI"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title <-- Press play on the music player to keep the tab alive (Uses only 13MB of data)\n",
|
||||
"#@title Press play on the music player to keep the tab alive (Uses only 13MB of data)\n",
|
||||
"%%html\n",
|
||||
"<b>Press play on the music player to keep the tab alive, then start your generation below (Uses only 13MB of data)</b><br/>\n",
|
||||
"<audio src=\"https://henk.tech/colabkobold/silence.m4a\" controls>"
|
||||
@ -409,27 +568,10 @@
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"JS to prevent idle timeout:\n",
|
||||
"\n",
|
||||
"Press F12 OR CTRL + SHIFT + I OR right click on this website -> inspect. Then click on the console tab and paste in the following code.\n",
|
||||
"\n",
|
||||
"function ClickConnect(){\n",
|
||||
"console.log(\"Working\");\n",
|
||||
"document.querySelector(\"colab-toolbar-button#connect\").click()\n",
|
||||
"}\n",
|
||||
"setInterval(ClickConnect,60000)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "pjIjiCuJysJI"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"#@title Open port 8501 and start Streamlit server. Open link in 'link.txt' file in file pane on left.\n",
|
||||
"#@title Run localtunnel and start Streamlit server. ('Ctrl' + 'left click') on link in the 'link.txt' file. (/content/link.txt)\n",
|
||||
"!npx localtunnel --port 8501 &>/content/link.txt &\n",
|
||||
"!streamlit run scripts/webui_streamlit.py --theme.base dark --server.headless true 2>&1 | tee -a /content/log.txt"
|
||||
],
|
||||
@ -440,4 +582,4 @@
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
3
babel.config.js
Normal file
@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
|
||||
};
|
17
blog/authors.yml
Normal file
@ -0,0 +1,17 @@
|
||||
endi:
|
||||
name: Endilie Yacop Sucipto
|
||||
title: Maintainer of Docusaurus
|
||||
url: https://github.com/endiliey
|
||||
image_url: https://github.com/endiliey.png
|
||||
|
||||
yangshun:
|
||||
name: Yangshun Tay
|
||||
title: Front End Engineer @ Facebook
|
||||
url: https://github.com/yangshun
|
||||
image_url: https://github.com/yangshun.png
|
||||
|
||||
slorber:
|
||||
name: Sébastien Lorber
|
||||
title: Docusaurus maintainer
|
||||
url: https://sebastienlorber.com
|
||||
image_url: https://github.com/slorber.png
|
@ -23,6 +23,7 @@ general:
|
||||
streamlit_telemetry: False
|
||||
default_theme: dark
|
||||
huggingface_token: ''
|
||||
stable_horde_api: '0000000000'
|
||||
gpu: 0
|
||||
outdir: outputs
|
||||
default_model: "Stable Diffusion v1.5"
|
||||
@ -65,6 +66,10 @@ general:
|
||||
update_preview: True
|
||||
update_preview_frequency: 10
|
||||
|
||||
admin:
|
||||
hide_server_setting: False
|
||||
hide_browser_setting: False
|
||||
|
||||
debug:
|
||||
enable_hydralit: False
|
||||
|
||||
@ -230,7 +235,7 @@ img2img:
|
||||
step: 0.01
|
||||
# 0: Keep masked area
|
||||
# 1: Regenerate only masked area
|
||||
mask_mode: 1
|
||||
mask_mode: 0
|
||||
noise_mode: "Matched Noise"
|
||||
mask_restore: False
|
||||
# 0: Just resize
|
||||
@ -309,6 +314,10 @@ img2txt:
|
||||
blip_image_eval_size: 512
|
||||
keep_all_models_loaded: False
|
||||
|
||||
post_processing:
|
||||
save_original_images: True
|
||||
outdir_post_processing: outputs/post_processing
|
||||
|
||||
concepts_library:
|
||||
concepts_per_page: 12
|
||||
|
||||
@ -316,7 +325,7 @@ gfpgan:
|
||||
strength: 100
|
||||
|
||||
textual_inversion:
|
||||
pretrained_model_name_or_path: "models/diffusers/stable-diffusion-v1-4"
|
||||
pretrained_model_name_or_path: "models/diffusers/stable-diffusion-v1-5"
|
||||
tokenizer_name: "models/clip-vit-large-patch14"
|
||||
|
||||
|
||||
@ -331,7 +340,7 @@ model_manager:
|
||||
files:
|
||||
model_ckpt:
|
||||
file_name: "Stable Diffusion v1.5.ckpt"
|
||||
download_link: "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt"
|
||||
download_link: "https://huggingface.co/ZeroCool94/stable-diffusion-v1-5/resolve/main/Stable%20Diffusion%20v1-5-Pruned-ema%20only.ckpt"
|
||||
|
||||
gfpgan:
|
||||
model_name: "GFPGAN"
|
||||
|
467
db.json
@ -1,467 +0,0 @@
|
||||
{
|
||||
"stable_diffusion": {
|
||||
"name": "stable_diffusion",
|
||||
"type": "ckpt",
|
||||
"description": "Generalist AI image generating model. The baseline for all finetuned models.",
|
||||
"version": "1.5",
|
||||
"style": "generalist",
|
||||
"nsfw": false,
|
||||
"download_all": true,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/ldm/stable-diffusion-v1/model_1_5.ckpt"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "model_1_5.ckpt",
|
||||
"file_path": "models/ldm/stable-diffusion-v1",
|
||||
"file_url": "https://{username}:{password}@huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt",
|
||||
"hf_auth": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"stable_diffusion_1.4": {
|
||||
"name": "stable_diffusion",
|
||||
"type": "ckpt",
|
||||
"description": "Generalist AI image generating model. The baseline for all finetuned models.",
|
||||
"version": "1.4",
|
||||
"style": "generalist",
|
||||
"nsfw": false,
|
||||
"download_all": true,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/ldm/stable-diffusion-v1/model.ckpt",
|
||||
"md5sum": "c01059060130b8242849d86e97212c84"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "model.ckpt",
|
||||
"file_path": "models/ldm/stable-diffusion-v1",
|
||||
"file_url": "https://www.googleapis.com/storage/v1/b/aai-blog-files/o/sd-v1-4.ckpt?alt=media"
|
||||
}
|
||||
],
|
||||
"alt_download": [
|
||||
{
|
||||
"file_name": "model.ckpt",
|
||||
"file_path": "models/ldm/stable-diffusion-v1",
|
||||
"file_url": "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt",
|
||||
"hf_auth": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"waifu_diffusion": {
|
||||
"name": "waifu_diffusion",
|
||||
"type": "ckpt",
|
||||
"description": "Anime styled generations.",
|
||||
"version": "1.3",
|
||||
"style": "anime",
|
||||
"nsfw": false,
|
||||
"download_all": true,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/custom/waifu-diffusion.ckpt",
|
||||
"md5sum": "a2aa170e3f513b32a3fd8841656e0123"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "waifu-diffusion.ckpt",
|
||||
"file_path": "models/custom",
|
||||
"file_url": "https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-full.ckpt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"Furry Epoch": {
|
||||
"name": "Furry Epoch",
|
||||
"type": "ckpt",
|
||||
"description": "Furry styled generations.",
|
||||
"version": "4",
|
||||
"style": "furry",
|
||||
"nsfw": false,
|
||||
"download_all": false,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/custom/furry-diffusion.ckpt",
|
||||
"md5sum": "f8ef45a295ef4966682f6e8fc2c6830d"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "furry-diffusion.ckpt",
|
||||
"file_path": "models/custom",
|
||||
"file_url": "https://sexy.canine.wf/file/furry-ckpt/furry_epoch4.ckpt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"Yiffy": {
|
||||
"name": "Yiffy",
|
||||
"type": "ckpt",
|
||||
"description": "Furry styled generations.",
|
||||
"version": "18",
|
||||
"style": "furry",
|
||||
"nsfw": false,
|
||||
"download_all": true,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/custom/yiffy.ckpt",
|
||||
"md5sum": "dbe25794e24af183565dc45e9ec99713"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "yiffy.ckpt",
|
||||
"file_path": "models/custom",
|
||||
"file_url": "https://sexy.canine.wf/file/yiffy-ckpt/yiffy-e18.ckpt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"Zack3D": {
|
||||
"name": "Zack3D",
|
||||
"type": "ckpt",
|
||||
"description": "Kink/NSFW oriented furry styled generations.",
|
||||
"version": "1",
|
||||
"style": "furry",
|
||||
"nsfw": true,
|
||||
"download_all": true,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/custom/Zack3D.ckpt",
|
||||
"md5sum": "aa944b1ecdaac60113027a0fdcda4f1b"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "Zack3D.ckpt",
|
||||
"file_path": "models/custom",
|
||||
"file_url": "https://sexy.canine.wf/file/furry-ckpt/Zack3D_Kinky-v1.ckpt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"trinart": {
|
||||
"name": "trinart",
|
||||
"type": "ckpt",
|
||||
"description": "Manga styled generations.",
|
||||
"version": "1",
|
||||
"style": "anime",
|
||||
"nsfw": false,
|
||||
"download_all": true,
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/custom/trinart.ckpt"
|
||||
},
|
||||
{
|
||||
"path": "configs/stable-diffusion/v1-inference.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "trinart.ckpt",
|
||||
"file_path": "models/custom",
|
||||
"file_url": "https://huggingface.co/naclbit/trinart_stable_diffusion_v2/resolve/main/trinart2_step95000.ckpt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"RealESRGAN_x4plus": {
|
||||
"name": "RealESRGAN_x4plus",
|
||||
"type": "realesrgan",
|
||||
"description": "Upscaler.",
|
||||
"version": "0.1.0",
|
||||
"style": "generalist",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/realesrgan/RealESRGAN_x4plus.pth"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "RealESRGAN_x4plus.pth",
|
||||
"file_path": "models/realesrgan",
|
||||
"file_url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"RealESRGAN_x4plus_anime_6B": {
|
||||
"name": "RealESRGAN_x4plus_anime_6B",
|
||||
"type": "realesrgan",
|
||||
"description": "Anime focused upscaler.",
|
||||
"version": "0.2.2.4",
|
||||
"style": "anime",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "RealESRGAN_x4plus_anime_6B.pth",
|
||||
"file_path": "models/realesrgan",
|
||||
"file_url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"GFPGAN": {
|
||||
"name": "GFPGAN",
|
||||
"type": "gfpgan",
|
||||
"description": "Face correction.",
|
||||
"version": "1.4",
|
||||
"style": "generalist",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/gfpgan/GFPGANv1.4.pth"
|
||||
},
|
||||
{
|
||||
"path": "gfpgan/weights/detection_Resnet50_Final.pth"
|
||||
},
|
||||
{
|
||||
"path": "gfpgan/weights/parsing_parsenet.pth"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "GFPGANv1.4.pth",
|
||||
"file_path": "models/gfpgan",
|
||||
"file_url": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth"
|
||||
},
|
||||
{
|
||||
"file_name": "detection_Resnet50_Final.pth",
|
||||
"file_path": "./gfpgan/weights",
|
||||
"file_url": "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth"
|
||||
},
|
||||
{
|
||||
"file_name": "parsing_parsenet.pth",
|
||||
"file_path": "./gfpgan/weights",
|
||||
"file_url": "https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"LDSR": {
|
||||
"name": "LDSR",
|
||||
"type": "ckpt",
|
||||
"description": "Upscaler.",
|
||||
"version": "1",
|
||||
"style": "generalist",
|
||||
"nsfw": false,
|
||||
"download_all": true,
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/ldsr/model.ckpt"
|
||||
},
|
||||
{
|
||||
"path": "models/ldsr/project.yaml"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "model.ckpt",
|
||||
"file_path": "models/ldsr",
|
||||
"file_url": "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
|
||||
},
|
||||
{
|
||||
"file_name": "project.yaml",
|
||||
"file_path": "models/ldsr",
|
||||
"file_url": "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"BLIP": {
|
||||
"name": "BLIP",
|
||||
"type": "blip",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/blip/model__base_caption.pth"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "model__base_caption.pth",
|
||||
"file_path": "models/blip",
|
||||
"file_url": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"ViT-L/14": {
|
||||
"name": "ViT-L/14",
|
||||
"type": "clip",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/clip/ViT-L-14.pt"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "ViT-L-14.pt",
|
||||
"file_path": "./models/clip",
|
||||
"file_url": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"ViT-g-14": {
|
||||
"name": "ViT-g-14",
|
||||
"pretrained_name": "laion2b_s12b_b42k",
|
||||
"type": "open_clip",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/clip/models--laion--CLIP-ViT-g-14-laion2B-s12B-b42K/"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "main",
|
||||
"file_path": "./models/clip/models--laion--CLIP-ViT-g-14-laion2B-s12B-b42K/refs",
|
||||
"file_content": "b36bdd32483debcf4ed2f918bdae1d4a46ee44b8"
|
||||
},
|
||||
{
|
||||
"file_name": "6aac683f899159946bc4ca15228bb7016f3cbb1a2c51f365cba0b23923f344da",
|
||||
"file_path": "./models/clip/models--laion--CLIP-ViT-g-14-laion2B-s12B-b42K/blobs",
|
||||
"file_url": "https://huggingface.co/laion/CLIP-ViT-g-14-laion2B-s12B-b42K/resolve/main/open_clip_pytorch_model.bin"
|
||||
},
|
||||
{
|
||||
"file_name": "open_clip_pytorch_model.bin",
|
||||
"file_path": "./models/clip/models--laion--CLIP-ViT-g-14-laion2B-s12B-b42K/snapshots/b36bdd32483debcf4ed2f918bdae1d4a46ee44b8",
|
||||
"symlink": "./models/clip/models--laion--CLIP-ViT-g-14-laion2B-s12B-b42K/blobs/6aac683f899159946bc4ca15228bb7016f3cbb1a2c51f365cba0b23923f344da"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"ViT-H-14": {
|
||||
"name": "ViT-H-14",
|
||||
"pretrained_name": "laion2b_s32b_b79k",
|
||||
"type": "open_clip",
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/clip/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "main",
|
||||
"file_path": "./models/clip/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/refs",
|
||||
"file_content": "58a1e03a7acfacbe6b95ebc24ae0394eda6a14fc"
|
||||
},
|
||||
{
|
||||
"file_name": "9a78ef8e8c73fd0df621682e7a8e8eb36c6916cb3c16b291a082ecd52ab79cc4",
|
||||
"file_path": "./models/clip/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/blobs",
|
||||
"file_url": "https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/resolve/main/open_clip_pytorch_model.bin"
|
||||
},
|
||||
{
|
||||
"file_name": "open_clip_pytorch_model.bin",
|
||||
"file_path": "./models/clip/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/snapshots/58a1e03a7acfacbe6b95ebc24ae0394eda6a14fc",
|
||||
"symlink": "./models/clip/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/blobs/9a78ef8e8c73fd0df621682e7a8e8eb36c6916cb3c16b291a082ecd52ab79cc4"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"diffusers_stable_diffusion": {
|
||||
"name": "diffusers_stable_diffusion",
|
||||
"type": "diffusers",
|
||||
"requires": [
|
||||
"clip-vit-large-patch14"
|
||||
],
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/diffusers/"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "diffusers_stable_diffusion",
|
||||
"file_url": "https://{username}:{password}@huggingface.co/CompVis/stable-diffusion-v1-4.git",
|
||||
"git": true,
|
||||
"hf_auth": true,
|
||||
"post_process": [
|
||||
{
|
||||
"delete": "models/diffusers/stable-diffusion-v1-4/.git"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
},
|
||||
"available": false
|
||||
}
|
||||
}
|
98
db_dep.json
@ -1,98 +0,0 @@
|
||||
{
|
||||
"sd-concepts-library": {
|
||||
"type": "dependency",
|
||||
"optional": true,
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/custom/sd-concepts-library/"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "sd-concepts-library",
|
||||
"file_path": "./models/custom/sd-concepts-library/",
|
||||
"file_url": "https://github.com/sd-webui/sd-concepts-library/archive/refs/heads/main.zip",
|
||||
"unzip": true,
|
||||
"move_subfolder": "sd-concepts-library"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
},
|
||||
"clip-vit-large-patch14": {
|
||||
"type": "dependency",
|
||||
"optional": false,
|
||||
"config": {
|
||||
"files": [
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/config.json"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/merges.txt"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/preprocessor_config.json"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/pytorch_model.bin"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/special_tokens_map.json"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/tokenizer.json"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/tokenizer_config.json"
|
||||
},
|
||||
{
|
||||
"path": "models/clip-vit-large-patch14/vocab.json"
|
||||
}
|
||||
],
|
||||
"download": [
|
||||
{
|
||||
"file_name": "config.json",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/config.json"
|
||||
},
|
||||
{
|
||||
"file_name": "merges.txt",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/merges.txt"
|
||||
},
|
||||
{
|
||||
"file_name": "preprocessor_config.json",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/preprocessor_config.json"
|
||||
},
|
||||
{
|
||||
"file_name": "pytorch_model.bin",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin"
|
||||
},
|
||||
{
|
||||
"file_name": "special_tokens_map.json",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/special_tokens_map.json"
|
||||
},
|
||||
{
|
||||
"file_name": "tokenizer.json",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/tokenizer.json"
|
||||
},
|
||||
{
|
||||
"file_name": "tokenizer_config.json",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/tokenizer_config.json"
|
||||
},
|
||||
{
|
||||
"file_name": "vocab.json",
|
||||
"file_path": "models/clip-vit-large-patch14",
|
||||
"file_url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/vocab.json"
|
||||
}
|
||||
]
|
||||
},
|
||||
"available": false
|
||||
}
|
||||
}
|
23
docs/1.Installation/1.one-click-installer.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Installer
|
||||
|
||||
---
|
||||
|
||||
### Sygil WebUI
|
||||
|
||||
Home Page: https://github.com/Sygil-Dev/sygil-webui
|
||||
|
||||
### Installation on Windows:
|
||||
|
||||
- Clone or download the code from the [Repository](https://github.com/Sygil-Dev/sygil-webui).
|
||||
|
||||
- Double-click the `installer/install.bat` file and wait for it to handle everything for you.
|
||||
|
||||
### Installation on Linux:
|
||||
|
||||
- Clone or download the code from the [Repository](https://github.com/Sygil-Dev/sygil-webui).
|
||||
|
||||
- Open a terminal on the folder where the code is located and run `./installer/install.sh` ,make sure it has the right permissions and can be executed.
|
||||
|
||||
- Wait for the installer to handle everything for you.
|
||||
|
||||
After installation, you can run the `webui.cmd` file (on Windows) or `webui.sh` file (on Linux/Mac) to start the WebUI.
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
|
||||
title: Windows Installation
|
||||
title: Manual Windows Installation
|
||||
---
|
||||
|
||||
<!--
|
||||
@ -23,7 +22,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Initial Setup
|
||||
|
||||
> This is a windows guide. [To install on Linux, see this page.](2.linux-installation.md)
|
||||
> This is a windows guide. [To install on Linux, see this page.](3.linux-installation.md)
|
||||
|
||||
## Pre requisites
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
title: Linux Installation
|
||||
title: Manual Linux Installation
|
||||
---
|
||||
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
|
||||
@ -20,22 +21,25 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
|
||||
### The definitive Stable Diffusion WebUI experience ™ Now 100% Linux Compatible!
|
||||
|
||||
#### Created by [Joshua Kimsey](https://github.com/JoshuaKimsey)
|
||||
|
||||
> This is a Linux guide. [To install on Windows, see this page.](1.windows-installation.md)
|
||||
> This is a Linux guide. [To install on Windows, see this page](2.windows-installation.md).
|
||||
|
||||
**Note: This guide describes how to use the automated installation script created by [Joshua Kimsey](https://github.com/JoshuaKimsey). It is my personal recommendation to use this script as it will automatically handle downloading and updating the repo for you. This is not a requirement to running the Stable Diffusion WebUI on Linux, but you will need to install your own conda environment manually and then run `webui.sh` to run the Stable Diffusion WebUI.**
|
||||
|
||||
## Features
|
||||
|
||||
- Automates the process of installing and running hlky's fork of Stable Diffusion with the WebUI for Linux-based OS users.
|
||||
- Handles updating from the hlky fork automatically if the users wishes to do so.
|
||||
- Allows the user to preset their configs for running their setup (Gradio version only).
|
||||
|
||||
## Initial Start Guide
|
||||
|
||||
**Note:** This guide assumes you have installed Anaconda already, and have it set up properly. If you have not, please visit the [Anaconda](https://www.anaconda.com/products/distribution) website to download the file for your system and install it.
|
||||
|
||||
**WARNING: Multiple Linux users have reported issues using this script, and potentially Stable Diffusion in general, with Miniconda. As such, I can not recommend using it due to these issues with unknown causes. Please use the full release of Anaconda instead.**
|
||||
|
||||
|
||||
**Step 1:** Create a folder/directory on your system and place this [script](https://github.com/JoshuaKimsey/Linux-StableDiffusion-Script/blob/main/linux-sd.sh) in it, named `linux-sd.sh`. This directory will be where the files for Stable Diffusion will be downloaded.
|
||||
|
||||
**Step 2:** Download the 1.4 AI model from HuggingFace (or another location, the original guide has some links to mirrors of the model) and place it in the same directory as the script.
|
||||
@ -49,11 +53,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
**If you are running low on storage space, you can just move the 1.4 AI models file directly to this directory, it will not be deleted, simply moved and renamed. However my personal suggestion is to just **copy** it to the repo folder, in case you desire to delete and rebuild your Stable Diffusion build again.**
|
||||
|
||||
**Step 6:** Next, the script will ask if you wish to customize any of the launch arguments for the Gradio WebUI Interface. If yes, then a series of options will be presented to the user:
|
||||
- Use the CPU for Extra Upscaler Models to save on VRAM
|
||||
- Automatically open a new browser window or tab on first launch
|
||||
- Use Optimized mode for Ultimate Stable Diffusion, which only requires 4GB of VRAM at the cost of speed
|
||||
- Use Optimized Turbo which uses more VRAM than regular optimized, but is faster (Incompatible with regular optimized mode)
|
||||
- Open a public xxxxx.gradi.app URL to share your interface with others (Please be careful with this, it is a potential security risk)
|
||||
- Use the CPU for Extra Upscaler Models to save on VRAM
|
||||
- Automatically open a new browser window or tab on first launch
|
||||
- Use Optimized mode for Ultimate Stable Diffusion, which only requires 4GB of VRAM at the cost of speed
|
||||
- Use Optimized Turbo which uses more VRAM than regular optimized, but is faster (Incompatible with regular optimized mode)
|
||||
- Open a public xxxxx.gradi.app URL to share your interface with others (Please be careful with this, it is a potential security risk)
|
||||
|
||||
The user will have the ability to set these to yes or no using the menu choices.
|
||||
|
||||
@ -64,17 +68,17 @@ The user will have the ability to set these to yes or no using the menu choices.
|
||||
**Building the Conda environment may take upwards of 15 minutes, depending on your network connection and system specs. This is normal, just leave it be and let it finish. If you are trying to update and the script hangs at `Installing PIP Dependencies` for more than 10 minutes, you will need to `Ctrl-C` to stop the script, delete your `src` folder, and rerun `linux-sd.sh` again.**
|
||||
|
||||
**Step 8:** Once the conda environment has been created and the upscaler models have been downloaded, then the user is presented with a choice to choose between the Streamlit or Gradio versions of the WebUI Interface.
|
||||
- Streamlit:
|
||||
- Has A More Modern UI
|
||||
- More Features Planned
|
||||
- Will Be The Main UI Going Forward
|
||||
- Currently In Active Development
|
||||
- Missing Some Gradio Features
|
||||
- Streamlit:
|
||||
- Has A More Modern UI
|
||||
- More Features Planned
|
||||
- Will Be The Main UI Going Forward
|
||||
- Currently In Active Development
|
||||
- Missing Some Gradio Features
|
||||
|
||||
- Gradio:
|
||||
- Currently Feature Complete
|
||||
- Uses An Older Interface Style
|
||||
- Will Not Receive Major Updates
|
||||
- Gradio:
|
||||
- Currently Feature Complete
|
||||
- Uses An Older Interface Style
|
||||
- Will Not Receive Major Updates
|
||||
|
||||
**Step 9:** If everything has gone successfully, either a new browser window will open with the Streamlit version, or you should see `Running on local URL: http://localhost:7860/` in your Terminal if you launched the Gradio Interface version. Generated images will be located in the `outputs` directory inside of `sygil-webui`. Enjoy the definitive Stable Diffusion WebUI experience on Linux! :)
|
||||
|
||||
@ -90,4 +94,5 @@ If the user chooses to Customize their setup, then they will be presented with t
|
||||
### Refer back to the original [WebUI Github Repo](https://github.com/Sygil-Dev/sygil-webui) for useful tips and links to other resources that can improve your Stable Diffusion experience
|
||||
|
||||
## Planned Additions
|
||||
|
||||
- Investigate ways to handle Anaconda automatic installation on a user's system.
|
132
docs/2.Streamlit/1.streamlit-interface.md
Normal file
@ -0,0 +1,132 @@
|
||||
# Streamlit Web UI Interface
|
||||
|
||||
## Features:
|
||||
|
||||
- Clean and easy to use UI: A clean and simple UI with an easy to use design.
|
||||
|
||||
- Support for widescreen displays.
|
||||
|
||||
- Dynamic Image Preview: This allows you to see how your image is looking during the generation process in real time or near real time (no more waiting until the end to see your image).
|
||||
|
||||
- Stop Button: A stop button to stop your generation mid-way if you do not like how your result is looking based on the image preview.
|
||||
|
||||
- An integrated gallery to show the generations for a prompt or session (Coming soon!)
|
||||
|
||||
- Better optimization VRAM usage optimization, less errors for bigger generations.
|
||||
|
||||
- Ability to load and use almost any `ckpt` models or model from HuggingFace.
|
||||
|
||||
- Text2Video: Generate video clips from text prompts right from the Web UI thanks to Diffusion Walk.
|
||||
|
||||
- Concepts Library: Easy to browse and use textual inversion library of custom embeddings others people (or yourself) have made via textual inversion.
|
||||
|
||||
- Textual Inversion: Allow you to create your own embeddings from images and use them in the UI.
|
||||
|
||||
- Actively being developed with new features being added and planned - Stay Tuned!
|
||||
|
||||
- Streamlit is now the new primary UI for the project moving forward.
|
||||
|
||||
- *Currently in active development but still missing some of the features present in the Gradio Interface (Post-Processing tab).*
|
||||
|
||||
---
|
||||
|
||||
## Text To Image
|
||||
|
||||
![](../../images/streamlit/streamlit-t2i.png)
|
||||
|
||||
Streamlit Text2Image allows for a modern, but well known, Stable Diffusion Textual Image generation experience. Here is a quick description of some of the features of Text2Image and what they do:
|
||||
|
||||
- Width and Height: Control the size of the generated image (Default is 512px)
|
||||
- Classifier Free Guidance (CFG): How closely the final image should follow your prompt (Default is 7.5)
|
||||
- Seed: The number (or word) used to generate an image with.
|
||||
- Images Per Batch: The number of images to generate consecutively (Does not affect VRAM usage)
|
||||
- Number of Batches: How many images to generate at once (Very VRAM Intensive)
|
||||
- Sampling Steps: The quality of the final output, higher is better (Default is 30)
|
||||
- Sampling Method: Which sampler to use to generate the image (Default is `k_euler`)
|
||||
|
||||
---
|
||||
|
||||
## Image To Image
|
||||
|
||||
![](../../images/streamlit/streamlit-i2i.png)
|
||||
|
||||
Streamlit Image2Image allows for you to take an image, be it generated by Stable Diffusion or otherwise, and use it as a base for another generation. This has the potential to really enhance images and fix issues with initial Text2Image generations. It also includes some built-in drawing and masking tools to help create custom generations. Some notable features of Gradio Image2Image are:
|
||||
|
||||
- Image Editor Mode: Choose whether you wish to mask, crop, or uncrop the image
|
||||
- Mask Mode: Allows you to decide if a drawn mask should be generated or kept
|
||||
- Denoising Strength: How much of the generated image should replace the original image. (default is 75%)
|
||||
- Width and Height: Control the size of the generated image (Default is 512px)
|
||||
- Classifier Free Guidance (CFG): How closely the final image should follow your prompt (Default is 7.5)
|
||||
- Seed: The number (or word) used to generate an image with
|
||||
- Images Per Batch: The number of images to generate consecutively (Does not affect VRAM)
|
||||
- Number of Batches: How many images to generate at once (Very VRAM Intensive)
|
||||
- Sampling Steps: The quality of the final output, higher is better (Default is 30)
|
||||
- Sampling Method: Which sampler to use to generate the image (Default is `k_euler`)
|
||||
|
||||
## Text To Video
|
||||
|
||||
---
|
||||
|
||||
![](../../images/streamlit/streamlit-t2v.png)
|
||||
|
||||
The Text2Video tab allow you to generate videos based on a text prompt, it does so by using diffusion walk which generates images using txt2img and then interpolate them in order to walk between prompts or parts of a prompt. As of right now you can only use a single prompt which is then duplicated based on the number of seeds you use.
|
||||
|
||||
#### Options:
|
||||
|
||||
- Width: The width in pixels for your output image.
|
||||
|
||||
- Height: The height in pixels for your output image.
|
||||
|
||||
- CFG: How close you want clip to guide the generation toward your prompt.
|
||||
|
||||
- Seed: Seed to use during generation, can be an integer, a word or several integers and words separated by comma. When multiple seeds are used the resulting video might be more consistent than when using a single seed.
|
||||
|
||||
- Max Duration in Seconds: How long your video should be in seconds.
|
||||
|
||||
- Custom Model: A list of models from the HuggingFace website to use, any model hosted on HuggingFace can be used for txt2vid. To add a new model to this list you need to edit the config file on `configs/webui/userconfig_streamlit.yaml`, find the `txt2vid` section and add your model under `custom_models_list`, use other models there for reference on how the format should looks like.
|
||||
|
||||
- Sampling Steps: How many steps to perform on an image, this can be seen as the amount of denoising steps performed on the image, higher values mean you get an image with less noise, it also affects the amount of repetition on the image you will get. For higher resolutions with short prompts and long prompts with low resolution you need to increase the number of samplings steps to get a better result, otherwise the amount of repetition on the image will increase exponentially if you use a lower sampling steps value.
|
||||
|
||||
- Inference Steps: Extra denoising steps performed between frames/images. This helps make the video more consistent and jump less around, higher values are always better but will make it so the video takes longer for generate.
|
||||
|
||||
- Scheduler: Scheduler to use for inference.
|
||||
|
||||
- Beta Schedule Type: Scheduler type to use.
|
||||
|
||||
## SD Concepts Library
|
||||
|
||||
---
|
||||
|
||||
![](../../images/streamlit/streamlit-concepts.png)
|
||||
|
||||
The Concept Library allows for the easy usage of custom textual inversion models. These models may be loaded into `models/custom/sd-concepts-library` and will appear in the Concepts Library in Streamlit. To use one of these custom models in a prompt, either copy it using the button on the model, or type `<model-name>` in the prompt where you wish to use it.
|
||||
|
||||
Please see the [Concepts Library](https://github.com/Sygil-Dev/sygil-webui/blob/master/docs/7.concepts-library.md) section to learn more about how to use these tools.
|
||||
|
||||
## Textual Inversion
|
||||
|
||||
---
|
||||
|
||||
Textual Inversion allows you to create your own embed files training the model in a few images to teach it a new concept based on what is already in the original model, this is fast and do not need too many resources to do so. You can train a new concept to use on the Concepts Library with Textual Inversion, once you finish training it you can then use it on the concepts library.
|
||||
|
||||
## Model Manager
|
||||
|
||||
---
|
||||
|
||||
![](../../images/streamlit/streamlit-model-manager.png)
|
||||
|
||||
The Model Manager page allows you to download models with the click of a button, instead of having to go and manually find the link to download a model and then finding where to put it on your disk for the app to detect it we made it so the Model Manager page does most of the heavy lifting for you.
|
||||
|
||||
## Tools
|
||||
|
||||
This section is a work in progress.
|
||||
|
||||
The idea behind it is to provide extra tools for advanced users to do things they need that are not possible with other normal pages. This is intended mainly for developers or technical people wanting to do more than just basic stuff with the UI and even integrating their own things into it.
|
||||
|
||||
## Settings
|
||||
|
||||
---
|
||||
|
||||
![](../../images/streamlit/streamlit-settings.png)
|
||||
|
||||
The Settings page allows you to customize how you want Streamlit to run. These changes will be saved to a personalized user setting file in `configs/webui/userconfig_streamlit.yaml`, this file is independent from the defaults file located in `configs/webui/webui_streamlit.yaml` and will not be reset after updates unless a mayor change that would break some options is released, in which case this file will be reset to the defaults values and you would need to configure your settings on the Settings page again. After editing the settings on the Settings page you need to scroll down and hit the `Save` button for it to take effect, you can also use the `Reset` button to reset your settings manually to the defaults in case you made a mistake with any of your settings and you need to reset them, so, feel free to experiment with them and test which settings work best for you and which don't.
|
@ -1,25 +1,13 @@
|
||||
---
|
||||
title: Gradio Web UI Interface
|
||||
---
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
# Gradio Web UI Interface
|
||||
|
||||
Copyright 2022 Sygil-Dev team.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
:::danger Warning
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
The Gradio WebUI is considered legacy or deprecated, it is no longer being maintained and it lacks several features compared to new versions. Always try to use the most recent version of our UI when possible.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
:::
|
||||
|
||||
### Gradio Web UI Features:
|
||||
|
||||
- Older UI design that is fully functional and feature complete.
|
||||
- Has access to all upscaling models, including LSDR.
|
||||
- Dynamic prompt entry automatically changes your generation settings based on `--params` in a prompt.
|
||||
@ -31,20 +19,19 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
To launch the Gradio Web UI, you will need to do the following:
|
||||
|
||||
- Windows:
|
||||
- Open your command line in the repo folder and run the `webui.cmd` file.
|
||||
- Open your command line in the repo folder and run the `webui.cmd` file.
|
||||
- Linux:
|
||||
- Open your terminal to the repo folder and run `webui.sh`, then press `2` when prompted.
|
||||
- Open your terminal to the repo folder and run `webui.sh`, then press `2` when prompted.
|
||||
- Manually:
|
||||
- Open your terminal to the repo folder.
|
||||
- Activate the conda environment using `conda activate ldm`
|
||||
- Run the command `python scripts/relauncher.py`
|
||||
- Open your terminal to the repo folder.
|
||||
- Activate the conda environment using `conda activate ldm`
|
||||
- Run the command `python scripts/relauncher.py`
|
||||
|
||||
Once the Gradio Web UI launches, a link will appear in your command line or terminal, click or copy and paste that link into your browser to access the interface.
|
||||
|
||||
## Text2Image
|
||||
---
|
||||
|
||||
![](../images/gradio/gradio-t2i.png)
|
||||
![](../../images/gradio/gradio-t2i.png)
|
||||
|
||||
Gradio Text2Image allows for the classic and well known Stable Diffusion Textual Image generation. Here is a quick description of some of the features of Text2Image and what they do:
|
||||
|
||||
@ -59,50 +46,52 @@ Gradio Text2Image allows for the classic and well known Stable Diffusion Textual
|
||||
- Send to Image Lab: Send the image to the Image Lab for Enhancement and Upscaling.
|
||||
|
||||
## Image2Image
|
||||
|
||||
---
|
||||
|
||||
![](../images/gradio/gradio-i2i.png)
|
||||
![](../../images/gradio/gradio-i2i.png)
|
||||
|
||||
Gradio Image2Image allows for you to take an image, be it generated by Stable Diffusion or otherwise, and use it as a base for another geenration. This has the potential to really enhance images and fix issues with initial Text2Image generations. It also includes some built-in drawing and masking tools to help create custom generations. Some notable features of Gradio Image2Image are:
|
||||
Gradio Image2Image allows for you to take an image, be it generated by Stable Diffusion or otherwise, and use it as a base for another generation. This has the potential to really enhance images and fix issues with initial Text2Image generations. It also includes some built-in drawing and masking tools to help create custom generations. Some notable features of Gradio Image2Image are:
|
||||
|
||||
- Image Editor Mode: Choose whether you wish to mask, crop, or uncrop the image
|
||||
- Mask Mode: Alloows you to decide if a drawn mask should be generated or kept
|
||||
- Mask Mode: Allows you to decide if a drawn mask should be generated or kept
|
||||
- Denoising Strength: How much of the generated image should replace the original image. (default is 70%)
|
||||
- Width and Height: Control the size of the generated image (Default is 512px)
|
||||
- Classifer Free Guidance (CFG): How closely the final image should follow your prompt (Default is 7.5)
|
||||
- Classifier Free Guidance (CFG): How closely the final image should follow your prompt (Default is 7.5)
|
||||
- Seed: The number (or word) used to generate an image with
|
||||
- Images Per Batch: The number of images to generate consecutively (Does not affect VRAM)
|
||||
- Number of Batches: How many images to generate at once (Very VRAM Intensive)
|
||||
- Sampling Steps: The quality of the final output, higher is better with dimiishing returns (Default is 50)
|
||||
- Sampling Steps: The quality of the final output, higher is better (Default is 50)
|
||||
- Sampling Method: Which sampler to use to generate the image (Default is `k_lms`)
|
||||
|
||||
## Image Lab
|
||||
|
||||
---
|
||||
|
||||
![](../images/gradio/gradio-upscale.png)
|
||||
![](../../images/gradio/gradio-upscale.png)
|
||||
|
||||
The Gradio Image Lab is a central location to access image enhancers and upscalers. Though some options are available in all tabs (GFPGAN and realESRGAN), the Image Lab is where all of these tools may be easily accessed. These upscalers can be used for geenrated images sent to the lab, or on other images uploaded to it. The tools included here are:
|
||||
The Gradio Image Lab is a central location to access image enhancers and upscalers. Though some options are available in all tabs (GFPGAN and realESRGAN), the Image Lab is where all of these tools may be easily accessed. These upscalers can be used for generated images sent to the lab, or on other images uploaded to it. The tools included here are:
|
||||
|
||||
- GFPGAN: Fixes and enhances faces
|
||||
- realESRGAN: A 4x upscaler that uses a GAN to achieve its results
|
||||
- GoBig: A 2x upsclaer that uses realESRGAN, but preserves more detail
|
||||
- LSDR: A 4x upsclaer that uses Latent Diffusion, preserving a lot more detail at the cost of speed and VRAM
|
||||
- GoBig: A 2x upscaler that uses realESRGAN, but preserves more detail
|
||||
- LSDR: A 4x upscaler that uses Latent Diffusion, preserving a lot more detail at the cost of speed and VRAM
|
||||
- GoLatent: Uses LSDR to do a 4x upscale, then GoBig to make a final 8x upscale with great detail preservation.
|
||||
|
||||
Please see the [Image Enhancers](6.image_enhancers.md) section to learn more about how to use these tools.
|
||||
|
||||
Please see the [Image Enhancers](../4.post-processing.md) section to learn more about how to use these tools.
|
||||
|
||||
## Scene2Image
|
||||
|
||||
---
|
||||
|
||||
![](../images/gradio/gradio-s2i.png)
|
||||
![](../../images/gradio/gradio-s2i.png)
|
||||
|
||||
Gradio Scene2Image allows you to define layers of images in a markdown-like syntax.
|
||||
|
||||
> Would it be possible to have a layers system where we could do have
|
||||
foreground, mid, and background objects which relate to one another and
|
||||
share the style? So we could say generate a landscape, one another layer
|
||||
generate a castle, and on another layer generate a crowd of people.
|
||||
> foreground, mid, and background objects which relate to one another and
|
||||
> share the style? So we could say generate a landscape, one another layer
|
||||
> generate a castle, and on another layer generate a crowd of people.
|
||||
|
||||
You write a a multi-line prompt that looks like markdown, where each section declares one layer.
|
||||
It is hierarchical, so each layer can have their own child layers.
|
||||
@ -143,8 +132,8 @@ Walking forwards one depth unit in the input image corresponds to a position `0,
|
||||
Walking to the right is something like `1,0,0`.
|
||||
Going downwards is then `0,1,0`.
|
||||
|
||||
|
||||
## Gradio Optional Customizations
|
||||
|
||||
---
|
||||
|
||||
Gradio allows for a number of possible customizations via command line arguments/terminal parameters. If you are running these manually, they would need to be run like this: `python scripts/webui.py --param`. Otherwise, you may add your own parameter customizations to `scripts/relauncher.py`, the program that automatically relaunches the Gradio interface should a crash happen.
|
||||
@ -170,6 +159,7 @@ share = False
|
||||
# Generate tiling images
|
||||
tiling = False
|
||||
```
|
||||
|
||||
Setting any of these to `True` will enable those parameters on every launch. Alternatively, if you wish to enable a `--parameter` not listed here, you can enter your own custom ones in this field inside of `scripts/relauncher.py`:
|
||||
|
||||
```
|
||||
@ -178,6 +168,7 @@ additional_arguments = ""
|
||||
```
|
||||
|
||||
## List of command line options
|
||||
|
||||
---
|
||||
|
||||
This is a list of the full set of optional parameters you can launch the Gradio Interface with.
|
||||
@ -241,5 +232,4 @@ optional arguments:
|
||||
--no-job-manager Don't use the experimental job manager on top of gradio (default: False)
|
||||
--max-jobs MAX_JOBS Maximum number of concurrent 'generate' commands (default: 1)
|
||||
--tiling Generate tiling images (default: False)
|
||||
|
||||
```
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Upscalers
|
||||
title: Post Processing
|
||||
---
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
@ -30,21 +30,24 @@ Included with both versions of the Web UI Interface are a series of image restor
|
||||
|
||||
GFPGAN is designed to help restore faces in Stable Diffusion outputs. If you have ever tried to generate images with people in them, you know why having a face restorer can come in handy. This is where GFPGAN comes in handy. It uses it's own GAN to detect and restore the faces of subjects within an image. It greatly helps to enhance the details in human faces, while also fixing issues with asymmetry or awkward looking eyes.
|
||||
|
||||
If you want to use GFPGAN to improve generated faces, you need to download the models for it seperately if you are on Windows or doing so manually on Linux.
|
||||
If you want to use GFPGAN to improve generated faces, you need to download the models for it separately if you are on Windows or doing so manually on Linux.
|
||||
Download [GFPGANv1.3.pth](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth) and put it
|
||||
into the `/sygil-webui/models/gfpgan` directory after you have setup the conda environment for the first time.
|
||||
|
||||
## RealESRGAN
|
||||
|
||||
---
|
||||
![](../images/RealESRGAN.png)
|
||||
|
||||
RealESRGAN is a 4x upscaler built into both versions of the Web UI interface. It uses its own GAN to upscale images while retaining details of an image. Two different versions of realESRGAN can be used, `RealESRGAN 4X` and `RealESRGAN 4X Anime`. Despite the name, don't hesitate to try either version when upscaling an image to see which works bert for a given output.
|
||||
![](../images\RealESRGAN.png)
|
||||
|
||||
If you want to use RealESRGAN to upscale your images, you need to download the models for it seperately if you are on Windows or doing so manually on Linux.
|
||||
RealESRGAN is a 4x upscaler built into both versions of the Web UI interface. It uses its own GAN to upscale images while retaining details of an image. Two different versions of RealESRGAN can be used, `RealESRGAN 4X` and `RealESRGAN 4X Anime`. Despite the name, don't hesitate to try either version when upscaling an image to see which works best for a given output.
|
||||
|
||||
If you want to use RealESRGAN to upscale your images, you need to download the models for it separately if you are on Windows or doing so manually on Linux.
|
||||
Download [RealESRGAN_x4plus.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) and [RealESRGAN_x4plus_anime_6B.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth).
|
||||
Put them into the `sygil-webui/models/realesrgan` directory after you have setup the conda environment for the first time.
|
||||
|
||||
## GoBig (Gradio only currently)
|
||||
|
||||
---
|
||||
|
||||
GoBig is a 2X upscaler that uses RealESRGAN to upscale the image and then slice it into small parts, each part gets diffused further by SD to create more details, great for adding and increasing details but will change the composition, might also fix issues like eyes etc. The settings are similar to Image2Image, with regards to strength and seed of the generation.
|
||||
@ -52,17 +55,19 @@ GoBig is a 2X upscaler that uses RealESRGAN to upscale the image and then slice
|
||||
To use GoBig, you will need to download the RealESRGAN models as directed above.
|
||||
|
||||
## Latent Diffusion Super Resolution - LSDR (Gradio only currently)
|
||||
|
||||
---
|
||||
|
||||
LSDR is a 4X upscaler with high VRAM usage that uses a Latent Diffusion model to upscale the image. This will accentuate the details of an image, but won't change the composition. This might introduce sharpening, but it is great for textures or compositions with plenty of details. However, it is slower and will use more VRAM.
|
||||
|
||||
If you want to use LSDR to upscale your images, you need to download the models for it seperately if you are on Windows or doing so manually on Linux.
|
||||
If you want to use LSDR to upscale your images, you need to download the models for it separately if you are on Windows or doing so manually on Linux.
|
||||
Download the LDSR [project.yaml](https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1) and [ model last.cpkt](https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1). Rename `last.ckpt` to `model.ckpt` and place both in the `sygil-webui/models/ldsr` directory after you have setup the conda environment for the first time.
|
||||
|
||||
## GoLatent (Gradio only currently)
|
||||
|
||||
---
|
||||
|
||||
GoLatent is an 8X upscaler with high VRAM usage. It uses GoBig to add details and then uses a Latent Diffusion (LSDR) model to upscale the image. This will result in less artifacting and sharpeninng. Use the settings to feed GoBig settings that will contribute to the result. Please note, this mode is considerably slower and uses significantly more VRAM.
|
||||
GoLatent is an 8X upscaler with high VRAM usage. It uses GoBig to add details and then uses a Latent Diffusion (LSDR) model to upscale the image. This will result in less artifacts and sharpening. Use the settings to feed GoBig settings that will contribute to the result. Please note, this mode is considerably slower and uses significantly more VRAM.
|
||||
|
||||
To use GoLatent, you will need to download the appropriate LSDR models as described above.
|
||||
|
@ -1,114 +0,0 @@
|
||||
---
|
||||
title: Streamlit Web UI Interface
|
||||
---
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
|
||||
Copyright 2022 Sygil-Dev team.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
|
||||
**Features:**
|
||||
- Clean UI with an easy to use design, with support for widescreen displays.
|
||||
- Dynamic live preview of your generations
|
||||
- Easily customizable presets right from the WebUI (Coming Soon!)
|
||||
- An integrated gallery to show the generations for a prompt or session (Coming soon!)
|
||||
- Better optimization VRAM usage optimization, less errors for bigger generations.
|
||||
- Text2Video - Generate video clips from text prompts right from the WEb UI (WIP)
|
||||
- Concepts Library - Run custom embeddings others have made via textual inversion.
|
||||
- Actively being developed with new features being added and planned - Stay Tuned!
|
||||
- Streamlit is now the new primary UI for the project moving forward.
|
||||
- *Currently in active development and still missing some of the features present in the Gradio Interface.*
|
||||
|
||||
### Launching The Streamlit Web UI
|
||||
|
||||
To launch the Streamlit Web UI, you will need to do the following:
|
||||
|
||||
- Windows:
|
||||
- Open your command line in the repo folder and run the `webui_streamlit.cmd` file.
|
||||
- Linux:
|
||||
- Open your terminal to the repo folder and run `webui.sh`, then press `1` when prompted.
|
||||
- Manually:
|
||||
- Open your terminal to the repo folder.
|
||||
- Activate the conda environment using `conda activate ldm`
|
||||
- Run the command `python -m streamlit run scripts/webui_streamlit.py`
|
||||
|
||||
Once the Streamlit Web UI launches, a new browser tab will open with the interface. A link will also appear in your terminal to allow you to copy and paste it as needed.
|
||||
|
||||
## Text2Image
|
||||
---
|
||||
|
||||
![](../images/streamlit/streamlit-t2i.png)
|
||||
|
||||
Streamlit Text2Image allows for a modern, but well known, Stable Diffusion Textual Image generation experience. Here is a quick description of some of the features of Text2Image and what they do:
|
||||
|
||||
- Width and Height: Control the size of the generated image (Default is 512px)
|
||||
- Classifer Free Guidance (CFG): How closely the final image should follow your prompt (Default is 7.5)
|
||||
- Seed: The number (or word) used to generate an image with
|
||||
- Images Per Batch: The number of images to generate consecutively (Does not affect VRAM)
|
||||
- Number of Batches: How many images to generate at once (Very VRAM Intensive)
|
||||
- Sampling Steps: The quality of the final output, higher is better with dimiishing returns (Default is 30)
|
||||
- Sampling Method: Which sampler to use to generate the image (Default is `k_euler`)
|
||||
|
||||
## Image2Image
|
||||
--
|
||||
|
||||
![](../images/streamlit/streamlit-i2i.png)
|
||||
|
||||
Streamlit Image2Image allows for you to take an image, be it generated by Stable Diffusion or otherwise, and use it as a base for another geenration. This has the potential to really enhance images and fix issues with initial Text2Image generations. It also includes some built-in drawing and masking tools to help create custom generations. Some notable features of Gradio Image2Image are:
|
||||
|
||||
- Image Editor Mode: Choose whether you wish to mask, crop, or uncrop the image
|
||||
- Mask Mode: Alloows you to decide if a drawn mask should be generated or kept
|
||||
- Denoising Strength: How much of the generated image should replace the original image. (default is 75%)
|
||||
- Width and Height: Control the size of the generated image (Default is 512px)
|
||||
- Classifer Free Guidance (CFG): How closely the final image should follow your prompt (Default is 7.5)
|
||||
- Seed: The number (or word) used to generate an image with
|
||||
- Images Per Batch: The number of images to generate consecutively (Does not affect VRAM)
|
||||
- Number of Batches: How many images to generate at once (Very VRAM Intensive)
|
||||
- Sampling Steps: The quality of the final output, higher is better with dimiishing returns (Default is 30)
|
||||
- Sampling Method: Which sampler to use to generate the image (Default is `k_euler`)
|
||||
|
||||
## Text2Video
|
||||
---
|
||||
|
||||
![](../images/streamlit/streamlit-t2v.png)
|
||||
|
||||
*Insert details of how to use T2V here*
|
||||
(ZeroCool neds to fill in details here of how Text2Video works)
|
||||
|
||||
## SD Concepts Library
|
||||
---
|
||||
|
||||
![](../images/streamlit/streamlit-concepts.png)
|
||||
|
||||
The Concept Library allows for the easy usage of custom textual inversion models. These models may be loaded into `models/custom/sd-concepts-library` and will appear in the Concepts Library in Streamlit. To use one of these custom models in a prompt, either copy it using the button on the model, or type `<model-name>` in the prompt where you wish to use it.
|
||||
|
||||
Please see the [Concepts Library](https://github.com/Sygil-Dev/sygil-webui/blob/master/docs/7.concepts-library.md) section to learn more about how to use these tools.
|
||||
|
||||
## Textual Inversion
|
||||
---
|
||||
|
||||
TBD
|
||||
|
||||
## Model Manager
|
||||
---
|
||||
|
||||
TBD
|
||||
|
||||
## Settings
|
||||
---
|
||||
|
||||
*This section of the Web UI is still in development*
|
||||
|
||||
This area allows you to custmoize how you want Streamlit to run. These changes will be saved to `configs/webui/userconfig_streamlit.yaml`.
|
9
docs/5.concepts-library.md
Normal file
@ -0,0 +1,9 @@
|
||||
# SD Concepts Library
|
||||
|
||||
## SD Concepts Library
|
||||
|
||||
---
|
||||
|
||||
![](../images/streamlit/streamlit-concepts.png)
|
||||
|
||||
The Concept Library allows for the easy usage of custom textual inversion models. These models may be loaded into `models/custom/sd-concepts-library` and will appear in the Concepts Library in Streamlit. To use one of these custom models in a prompt, either copy it using the button on the model, or type `<model-name>` in the prompt where you wish to use it.
|
51
docs/6.custom-models.md
Normal file
@ -0,0 +1,51 @@
|
||||
# Custom Models
|
||||
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
|
||||
Copyright 2022 Sygil-Dev team.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
|
||||
You can use other *versions* of Stable Diffusion, and *fine-tunes* of Stable Diffusion.
|
||||
|
||||
Any model with the `.ckpt` extension can be placed into the `models/custom` folder and used in the UI. The filename of the model will be used to show the model on the drop-down menu on the UI from which you can select and use your custom model so, make sure it has a good filename so you can recognize it from the drop-down menu.
|
||||
|
||||
# Official models supported:
|
||||
|
||||
## Stable Diffusion versions:
|
||||
|
||||
- ### [Stable Diffusion v1-3](https://huggingface.co/CompVis/stable-diffusion-v1-3)
|
||||
|
||||
- ### [Stable Diffusion v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)
|
||||
|
||||
- ### [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)
|
||||
|
||||
## Waifu Diffusion versions:
|
||||
|
||||
- ### Waifu Diffusion v1.2
|
||||
|
||||
- ### [Waifu Diffusion v1.3](https://huggingface.co/hakurei/waifu-diffusion-v1-3)
|
||||
|
||||
## Trinart:
|
||||
|
||||
- ### [Trinart Characters](https://huggingface.co/naclbit/trinart_characters_19.2m_stable_diffusion_v1)
|
||||
|
||||
- ### [Trinart v2](https://huggingface.co/naclbit/trinart_stable_diffusion_v2)
|
||||
|
||||
-
|
||||
|
||||
## Unofficial Model List:
|
||||
|
||||
- ### [Rentry Stable Diffusion Model List.](https://rentry.org/sdmodels)
|
@ -1,20 +0,0 @@
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
|
||||
Copyright 2022 Sygil-Dev team.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
|
||||
|
||||
TBD
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
title: Custom models
|
||||
---
|
||||
<!--
|
||||
This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
|
||||
Copyright 2022 Sygil-Dev team.
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
|
||||
|
||||
You can use other *versions* of Stable Diffusion, and *finetunes* of Stable Diffusion.
|
||||
|
||||
## Stable Diffusion versions
|
||||
|
||||
### v1-3
|
||||
|
||||
### v1-4
|
||||
|
||||
### 🔜™️ v1-5 🔜™️
|
||||
|
||||
## Finetunes
|
||||
|
||||
## TrinArt/Trin-sama
|
||||
|
||||
### [v1](https://huggingface.co/naclbit/trinart_stable_diffusion)
|
||||
|
||||
### [v2](https://huggingface.co/naclbit/trinart_stable_diffusion_v2)
|
||||
|
||||
## [Waifu Diffusion](https://huggingface.co/hakurei/waifu-diffusion)
|
||||
|
||||
### [v2](https://storage.googleapis.com/ws-store2/wd-v1-2-full-ema.ckpt) [trimmed](https://huggingface.co/crumb/pruned-waifu-diffusion)
|
196
docusaurus.config.js
Normal file
@ -0,0 +1,196 @@
|
||||
// @ts-check
|
||||
// Note: type annotations allow type checking and IDEs autocompletion
|
||||
|
||||
const lightCodeTheme = require('prism-react-renderer/themes/github');
|
||||
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
|
||||
|
||||
/** @type {import('@docusaurus/types').Config} */
|
||||
const config = {
|
||||
title: 'Sygil WebUI',
|
||||
tagline: 'Web-based UI for Stable Diffusion',
|
||||
url: 'https://sygil-dev.github.io',
|
||||
baseUrl: '/sygil-webui',
|
||||
onBrokenLinks: 'throw',
|
||||
onBrokenMarkdownLinks: 'warn',
|
||||
//favicon: 'images/logo.svg',
|
||||
|
||||
// GitHub pages deployment config.
|
||||
// If you aren't using GitHub pages, you don't need these.
|
||||
organizationName: 'Sygil-Dev', // Usually your GitHub org/user name.
|
||||
projectName: 'sygil-webui', // Usually your repo name.
|
||||
deploymentBranch: 'gh-pages',
|
||||
|
||||
// Even if you don't use internalization, you can use this field to set useful
|
||||
// metadata like html lang. For example, if your site is Chinese, you may want
|
||||
// to replace "en" with "zh-Hans".
|
||||
i18n: {
|
||||
defaultLocale: 'en',
|
||||
locales: ['en'],
|
||||
},
|
||||
|
||||
// ...
|
||||
plugins: [
|
||||
[
|
||||
// enable search using docusaurus-search-local
|
||||
require.resolve("@cmfcmf/docusaurus-search-local"),
|
||||
{
|
||||
// whether to index docs pages
|
||||
indexDocs: true,
|
||||
|
||||
// Whether to also index the titles of the parent categories in the sidebar of a doc page.
|
||||
// 0 disables this feature.
|
||||
// 1 indexes the direct parent category in the sidebar of a doc page
|
||||
// 2 indexes up to two nested parent categories of a doc page
|
||||
// 3...
|
||||
//
|
||||
// Do _not_ use Infinity, the value must be a JSON-serializable integer.
|
||||
indexDocSidebarParentCategories: 1,
|
||||
|
||||
// whether to index blog pages
|
||||
indexBlog: true,
|
||||
|
||||
// whether to index static pages
|
||||
// /404.html is never indexed
|
||||
indexPages: false,
|
||||
|
||||
// language of your documentation, see next section
|
||||
language: "en",
|
||||
|
||||
// setting this to "none" will prevent the default CSS to be included. The default CSS
|
||||
// comes from autocomplete-theme-classic, which you can read more about here:
|
||||
// https://www.algolia.com/doc/ui-libraries/autocomplete/api-reference/autocomplete-theme-classic/
|
||||
// When you want to overwrite CSS variables defined by the default theme, make sure to suffix your
|
||||
// overwrites with `!important`, because they might otherwise not be applied as expected. See the
|
||||
// following comment for more information: https://github.com/cmfcmf/docusaurus-search-local/issues/107#issuecomment-1119831938.
|
||||
style: undefined,
|
||||
|
||||
// The maximum number of search results shown to the user. This does _not_ affect performance of
|
||||
// searches, but simply does not display additional search results that have been found.
|
||||
maxSearchResults: 8,
|
||||
|
||||
// lunr.js-specific settings
|
||||
lunr: {
|
||||
// When indexing your documents, their content is split into "tokens".
|
||||
// Text entered into the search box is also tokenized.
|
||||
// This setting configures the separator used to determine where to split the text into tokens.
|
||||
// By default, it splits the text at whitespace and dashes.
|
||||
//
|
||||
// Note: Does not work for "ja" and "th" languages, since these use a different tokenizer.
|
||||
tokenizerSeparator: /[\s\-]+/,
|
||||
// https://lunrjs.com/guides/customising.html#similarity-tuning
|
||||
//
|
||||
// This parameter controls the importance given to the length of a document and its fields. This
|
||||
// value must be between 0 and 1, and by default it has a value of 0.75. Reducing this value
|
||||
// reduces the effect of different length documents on a term’s importance to that document.
|
||||
b: 0.75,
|
||||
// This controls how quickly the boost given by a common word reaches saturation. Increasing it
|
||||
// will slow down the rate of saturation and lower values result in quicker saturation. The
|
||||
// default value is 1.2. If the collection of documents being indexed have high occurrences
|
||||
// of words that are not covered by a stop word filter, these words can quickly dominate any
|
||||
// similarity calculation. In these cases, this value can be reduced to get more balanced results.
|
||||
k1: 1.2,
|
||||
// By default, we rank pages where the search term appears in the title higher than pages where
|
||||
// the search term appears in just the text. This is done by "boosting" title matches with a
|
||||
// higher value than content matches. The concrete boosting behavior can be controlled by changing
|
||||
// the following settings.
|
||||
titleBoost: 5,
|
||||
contentBoost: 1,
|
||||
tagsBoost: 3,
|
||||
parentCategoriesBoost: 2, // Only used when indexDocSidebarParentCategories > 0
|
||||
}
|
||||
},
|
||||
],
|
||||
],
|
||||
|
||||
presets: [
|
||||
[
|
||||
'classic',
|
||||
/** @type {import('@docusaurus/preset-classic').Options} */
|
||||
({
|
||||
docs: {
|
||||
|
||||
sidebarCollapsed: false,
|
||||
sidebarPath: require.resolve('./sidebars.js'),
|
||||
// Please change this to your repo.
|
||||
// Remove this to remove the "edit this page" links.
|
||||
editUrl:
|
||||
'https://github.com/Sygil-Dev/sygil-webui/tree/main/',
|
||||
},
|
||||
blog: {
|
||||
showReadingTime: true,
|
||||
// Please change this to your repo.
|
||||
// Remove this to remove the "edit this page" links.
|
||||
editUrl:
|
||||
'https://github.com/Sygil-Dev/sygil-webui/tree/main/',
|
||||
},
|
||||
theme: {
|
||||
customCss: require.resolve('./frontend/css/docs_custom.css'),
|
||||
},
|
||||
}),
|
||||
],
|
||||
],
|
||||
|
||||
themeConfig:
|
||||
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
|
||||
({
|
||||
navbar: {
|
||||
title: 'Sygil WebUI',
|
||||
//logo: {
|
||||
// alt: 'Sygil Logo',
|
||||
// src: 'images/logo.svg',
|
||||
//},
|
||||
items: [
|
||||
{
|
||||
type: 'doc',
|
||||
docId: 'Installation/one-click-installer',
|
||||
position: 'left',
|
||||
label: 'Getting Started',
|
||||
},
|
||||
{to: '/blog', label: 'Blog', position: 'left'},
|
||||
{
|
||||
href: 'https://github.com/Sygil-Dev/sygil-webui',
|
||||
label: 'GitHub',
|
||||
position: 'right',
|
||||
},
|
||||
],
|
||||
},
|
||||
footer: {
|
||||
style: 'dark',
|
||||
links: [
|
||||
{
|
||||
title: 'Community',
|
||||
items: [
|
||||
{
|
||||
label: 'Discord',
|
||||
href: 'https://discord.gg/ttM8Tm6wge',
|
||||
},
|
||||
{
|
||||
label: 'Twitter',
|
||||
href: 'https://twitter.com/Sygil_Dev',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'More',
|
||||
items: [
|
||||
{
|
||||
label: 'Blog',
|
||||
to: '/blog',
|
||||
},
|
||||
{
|
||||
label: 'GitHub',
|
||||
href: 'https://github.com/Sygil-Dev/sygil-webui',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Sygil.Dev`,
|
||||
},
|
||||
prism: {
|
||||
theme: lightCodeTheme,
|
||||
darkTheme: darkCodeTheme,
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
module.exports = config;
|
@ -17,9 +17,12 @@ name: ldm
|
||||
channels:
|
||||
- pytorch
|
||||
- defaults
|
||||
- nvidia
|
||||
# Psst. If you change a dependency, make sure it's mirrored in the docker requirement
|
||||
# files as well.
|
||||
dependencies:
|
||||
- nodejs=18.11.0
|
||||
- yarn=1.22.19
|
||||
- cudatoolkit=11.3
|
||||
- git
|
||||
- numpy=1.22.3
|
||||
@ -29,5 +32,4 @@ dependencies:
|
||||
- scikit-image=0.19.2
|
||||
- torchvision=0.12.0
|
||||
- pip:
|
||||
- -r requirements.txt
|
||||
|
||||
- -r requirements.txt
|
30
frontend/css/docs_custom.css
Normal file
@ -0,0 +1,30 @@
|
||||
/**
|
||||
* Any CSS included here will be global. The classic template
|
||||
* bundles Infima by default. Infima is a CSS framework designed to
|
||||
* work well for content-centric websites.
|
||||
*/
|
||||
|
||||
/* You can override the default Infima variables here. */
|
||||
:root {
|
||||
--ifm-color-primary: #2e8555;
|
||||
--ifm-color-primary-dark: #29784c;
|
||||
--ifm-color-primary-darker: #277148;
|
||||
--ifm-color-primary-darkest: #205d3b;
|
||||
--ifm-color-primary-light: #33925d;
|
||||
--ifm-color-primary-lighter: #359962;
|
||||
--ifm-color-primary-lightest: #3cad6e;
|
||||
--ifm-code-font-size: 95%;
|
||||
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
/* For readability concerns, you should choose a lighter palette in dark mode. */
|
||||
[data-theme='dark'] {
|
||||
--ifm-color-primary: #25c2a0;
|
||||
--ifm-color-primary-dark: #21af90;
|
||||
--ifm-color-primary-darker: #1fa588;
|
||||
--ifm-color-primary-darkest: #1a8870;
|
||||
--ifm-color-primary-light: #29d5b0;
|
||||
--ifm-color-primary-lighter: #32d8b4;
|
||||
--ifm-color-primary-lightest: #4fddbf;
|
||||
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
|
||||
}
|
@ -160,7 +160,7 @@ div.gallery:hover {
|
||||
/* Remove some empty spaces to make the UI more compact. */
|
||||
.css-18e3th9{
|
||||
padding-left: 10px;
|
||||
padding-right: 10px;
|
||||
padding-right: 30px;
|
||||
position: unset !important; /* Fixes the layout/page going up when an expander or another item is expanded and then collapsed */
|
||||
}
|
||||
.css-k1vhr4{
|
||||
@ -168,7 +168,7 @@ div.gallery:hover {
|
||||
}
|
||||
.css-ret2ud{
|
||||
padding-left: 10px;
|
||||
padding-right: 25px;
|
||||
padding-right: 30px;
|
||||
gap: initial;
|
||||
display: initial;
|
||||
}
|
||||
|
BIN
images/docusaurus.png
Normal file
After Width: | Height: | Size: 5.0 KiB |
BIN
images/favicon.ico
Normal file
After Width: | Height: | Size: 3.5 KiB |
1
images/logo.svg
Normal file
After Width: | Height: | Size: 6.3 KiB |
BIN
images/streamlit/streamlit-model-manager.png
Normal file
After Width: | Height: | Size: 48 KiB |
BIN
images/streamlit/streamlit-settings.png
Normal file
After Width: | Height: | Size: 123 KiB |
Before Width: | Height: | Size: 83 KiB After Width: | Height: | Size: 83 KiB |
171
images/undraw_docusaurus_mountain.svg
Normal file
@ -0,0 +1,171 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1088" height="687.962" viewBox="0 0 1088 687.962">
|
||||
<title>Easy to Use</title>
|
||||
<g id="Group_12" data-name="Group 12" transform="translate(-57 -56)">
|
||||
<g id="Group_11" data-name="Group 11" transform="translate(57 56)">
|
||||
<path id="Path_83" data-name="Path 83" d="M1017.81,560.461c-5.27,45.15-16.22,81.4-31.25,110.31-20,38.52-54.21,54.04-84.77,70.28a193.275,193.275,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.282,657.282,0,0,0-104.09-13.16q-14.97-.675-29.97-.67c-15.42.02-293.07,5.29-360.67-131.57-16.69-33.76-28.13-75-32.24-125.27-11.63-142.12,52.29-235.46,134.74-296.47,155.97-115.41,369.76-110.57,523.43,7.88C941.15,276.621,1036.99,396.031,1017.81,560.461Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_84" data-name="Path 84" d="M986.56,670.771c-20,38.52-47.21,64.04-77.77,80.28a193.272,193.272,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.3,657.3,0,0,0-104.09-13.16q-14.97-.675-29.97-.67-23.13.03-46.25,1.72c-100.17,7.36-253.82-6.43-321.42-143.29L382,283.981,444.95,445.6l20.09,51.59,55.37-75.98L549,381.981l130.2,149.27,36.8-81.27L970.78,657.9l14.21,11.59Z" transform="translate(-56 -106.019)" fill="#f2f2f2"/>
|
||||
<path id="Path_85" data-name="Path 85" d="M302,282.962l26-57,36,83-31-60Z" opacity="0.1"/>
|
||||
<path id="Path_86" data-name="Path 86" d="M610.5,753.821q-14.97-.675-29.97-.67L465.04,497.191Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<path id="Path_87" data-name="Path 87" d="M464.411,315.191,493,292.962l130,150-132-128Z" opacity="0.1"/>
|
||||
<path id="Path_88" data-name="Path 88" d="M908.79,751.051a193.265,193.265,0,0,1-27.46,11.94L679.2,531.251Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<circle id="Ellipse_11" data-name="Ellipse 11" cx="3" cy="3" r="3" transform="translate(479 98.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_12" data-name="Ellipse 12" cx="3" cy="3" r="3" transform="translate(396 201.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_13" data-name="Ellipse 13" cx="2" cy="2" r="2" transform="translate(600 220.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_14" data-name="Ellipse 14" cx="2" cy="2" r="2" transform="translate(180 265.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_15" data-name="Ellipse 15" cx="2" cy="2" r="2" transform="translate(612 96.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_16" data-name="Ellipse 16" cx="2" cy="2" r="2" transform="translate(736 192.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_17" data-name="Ellipse 17" cx="2" cy="2" r="2" transform="translate(858 344.962)" fill="#f2f2f2"/>
|
||||
<path id="Path_89" data-name="Path 89" d="M306,121.222h-2.76v-2.76h-1.48v2.76H299V122.7h2.76v2.759h1.48V122.7H306Z" fill="#f2f2f2"/>
|
||||
<path id="Path_90" data-name="Path 90" d="M848,424.222h-2.76v-2.76h-1.48v2.76H841V425.7h2.76v2.759h1.48V425.7H848Z" fill="#f2f2f2"/>
|
||||
<path id="Path_91" data-name="Path 91" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_92" data-name="Path 92" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<ellipse id="Ellipse_18" data-name="Ellipse 18" cx="544" cy="30" rx="544" ry="30" transform="translate(0 583.962)" fill="#3f3d56"/>
|
||||
<path id="Path_93" data-name="Path 93" d="M624,677.981c0,33.137-14.775,24-33,24s-33,9.137-33-24,33-96,33-96S624,644.844,624,677.981Z" transform="translate(-56 -106.019)" fill="#ff6584"/>
|
||||
<path id="Path_94" data-name="Path 94" d="M606,690.66c0,15.062-6.716,10.909-15,10.909s-15,4.153-15-10.909,15-43.636,15-43.636S606,675.6,606,690.66Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<rect id="Rectangle_97" data-name="Rectangle 97" width="92" height="18" rx="9" transform="translate(489 604.962)" fill="#2f2e41"/>
|
||||
<rect id="Rectangle_98" data-name="Rectangle 98" width="92" height="18" rx="9" transform="translate(489 586.962)" fill="#2f2e41"/>
|
||||
<path id="Path_95" data-name="Path 95" d="M193,596.547c0,55.343,34.719,100.126,77.626,100.126" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_96" data-name="Path 96" d="M270.626,696.673c0-55.965,38.745-101.251,86.626-101.251" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_97" data-name="Path 97" d="M221.125,601.564c0,52.57,22.14,95.109,49.5,95.109" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_98" data-name="Path 98" d="M270.626,696.673c0-71.511,44.783-129.377,100.126-129.377" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_99" data-name="Path 99" d="M254.3,697.379s11.009-.339,14.326-2.7,16.934-5.183,17.757-1.395,16.544,18.844,4.115,18.945-28.879-1.936-32.19-3.953S254.3,697.379,254.3,697.379Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_100" data-name="Path 100" d="M290.716,710.909c-12.429.1-28.879-1.936-32.19-3.953-2.522-1.536-3.527-7.048-3.863-9.591l-.368.014s.7,8.879,4.009,10.9,19.761,4.053,32.19,3.953c3.588-.029,4.827-1.305,4.759-3.2C294.755,710.174,293.386,710.887,290.716,710.909Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_101" data-name="Path 101" d="M777.429,633.081c0,38.029,23.857,68.8,53.341,68.8" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_102" data-name="Path 102" d="M830.769,701.882c0-38.456,26.623-69.575,59.525-69.575" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_103" data-name="Path 103" d="M796.755,636.528c0,36.124,15.213,65.354,34.014,65.354" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_104" data-name="Path 104" d="M830.769,701.882c0-49.139,30.773-88.9,68.8-88.9" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_105" data-name="Path 105" d="M819.548,702.367s7.565-.233,9.844-1.856,11.636-3.562,12.2-.958,11.368,12.949,2.828,13.018-19.844-1.33-22.119-2.716S819.548,702.367,819.548,702.367Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_106" data-name="Path 106" d="M844.574,711.664c-8.54.069-19.844-1.33-22.119-2.716-1.733-1.056-2.423-4.843-2.654-6.59l-.253.01s.479,6.1,2.755,7.487,13.579,2.785,22.119,2.716c2.465-.02,3.317-.9,3.27-2.2C847.349,711.159,846.409,711.649,844.574,711.664Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_107" data-name="Path 107" d="M949.813,724.718s11.36-1.729,14.5-4.591,16.89-7.488,18.217-3.667,19.494,17.447,6.633,19.107-30.153,1.609-33.835-.065S949.813,724.718,949.813,724.718Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_108" data-name="Path 108" d="M989.228,734.173c-12.86,1.659-30.153,1.609-33.835-.065-2.8-1.275-4.535-6.858-5.2-9.45l-.379.061s1.833,9.109,5.516,10.783,20.975,1.725,33.835.065c3.712-.479,4.836-1.956,4.529-3.906C993.319,732.907,991.991,733.817,989.228,734.173Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_109" data-name="Path 109" d="M670.26,723.9s9.587-1.459,12.237-3.875,14.255-6.32,15.374-3.095,16.452,14.725,5.6,16.125-25.448,1.358-28.555-.055S670.26,723.9,670.26,723.9Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_110" data-name="Path 110" d="M703.524,731.875c-10.853,1.4-25.448,1.358-28.555-.055-2.367-1.076-3.827-5.788-4.39-7.976l-.32.051s1.547,7.687,4.655,9.1,17.7,1.456,28.555.055c3.133-.4,4.081-1.651,3.822-3.3C706.977,730.807,705.856,731.575,703.524,731.875Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_111" data-name="Path 111" d="M178.389,719.109s7.463-1.136,9.527-3.016,11.1-4.92,11.969-2.409,12.808,11.463,4.358,12.553-19.811,1.057-22.23-.043S178.389,719.109,178.389,719.109Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_112" data-name="Path 112" d="M204.285,725.321c-8.449,1.09-19.811,1.057-22.23-.043-1.842-.838-2.979-4.506-3.417-6.209l-.249.04s1.2,5.984,3.624,7.085,13.781,1.133,22.23.043c2.439-.315,3.177-1.285,2.976-2.566C206.973,724.489,206.1,725.087,204.285,725.321Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_113" data-name="Path 113" d="M439.7,707.337c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873,42.118-36.793,93.694-36.793S439.7,677.117,439.7,707.337Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<path id="Path_114" data-name="Path 114" d="M439.7,699.9c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873S295.04,663.1,346.616,663.1,439.7,669.676,439.7,699.9Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
</g>
|
||||
<g id="docusaurus_keytar" transform="translate(312.271 493.733)">
|
||||
<path id="Path_40" data-name="Path 40" d="M99,52h91.791V89.153H99Z" transform="translate(5.904 -14.001)" fill="#fff" fill-rule="evenodd"/>
|
||||
<path id="Path_41" data-name="Path 41" d="M24.855,163.927A21.828,21.828,0,0,1,5.947,153a21.829,21.829,0,0,0,18.908,32.782H46.71V163.927Z" transform="translate(-3 -4.634)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_42" data-name="Path 42" d="M121.861,61.1l76.514-4.782V45.39A21.854,21.854,0,0,0,176.52,23.535H78.173L75.441,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L64.513,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L53.586,18.8a3.154,3.154,0,0,0-5.464,0L45.39,23.535c-.024,0-.046,0-.071,0l-4.526-4.525a3.153,3.153,0,0,0-5.276,1.414l-1.5,5.577-5.674-1.521a3.154,3.154,0,0,0-3.863,3.864L26,34.023l-5.575,1.494a3.155,3.155,0,0,0-1.416,5.278l4.526,4.526c0,.023,0,.046,0,.07L18.8,48.122a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,59.05a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,69.977a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,80.9a3.154,3.154,0,0,0,0,5.464L23.535,89.1,18.8,91.832a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,102.76a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,113.687a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,124.615a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,135.542a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,146.469a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,157.4a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,168.324a3.154,3.154,0,0,0,0,5.464l4.732,2.732A21.854,21.854,0,0,0,45.39,198.375H176.52a21.854,21.854,0,0,0,21.855-21.855V89.1l-76.514-4.782a11.632,11.632,0,0,1,0-23.219" transform="translate(-1.681 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_43" data-name="Path 43" d="M143,186.71h32.782V143H143Z" transform="translate(9.984 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_44" data-name="Path 44" d="M196.71,159.855a5.438,5.438,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(10.912 -6.025)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_45" data-name="Path 45" d="M153,124.855h32.782V103H153Z" transform="translate(10.912 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_46" data-name="Path 46" d="M194.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.814,2.814,0,0,0,.349.035" transform="translate(12.767 -9.377)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_47" data-name="Path 47" d="M65.087,56.891a2.732,2.732,0,0,1-2.732-2.732,8.2,8.2,0,0,0-16.391,0,2.732,2.732,0,0,1-5.464,0,13.659,13.659,0,0,1,27.319,0,2.732,2.732,0,0,1-2.732,2.732" transform="translate(0.478 -15.068)" fill-rule="evenodd"/>
|
||||
<path id="Path_48" data-name="Path 48" d="M103,191.347h65.565a21.854,21.854,0,0,0,21.855-21.855V93H124.855A21.854,21.854,0,0,0,103,114.855Z" transform="translate(6.275 -10.199)" fill="#ffff50" fill-rule="evenodd"/>
|
||||
<path id="Path_49" data-name="Path 49" d="M173.216,129.787H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0-54.434H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.652H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186M189.585,61.611c-.013,0-.024-.007-.037-.005-3.377.115-4.974,3.492-6.384,6.472-1.471,3.114-2.608,5.139-4.473,5.078-2.064-.074-3.244-2.406-4.494-4.874-1.436-2.835-3.075-6.049-6.516-5.929-3.329.114-4.932,3.053-6.346,5.646-1.5,2.762-2.529,4.442-4.5,4.364-2.106-.076-3.225-1.972-4.52-4.167-1.444-2.443-3.112-5.191-6.487-5.1-3.272.113-4.879,2.606-6.3,4.808-1.5,2.328-2.552,3.746-4.551,3.662-2.156-.076-3.27-1.65-4.558-3.472-1.447-2.047-3.077-4.363-6.442-4.251-3.2.109-4.807,2.153-6.224,3.954-1.346,1.709-2.4,3.062-4.621,2.977a1.093,1.093,0,0,0-.079,2.186c3.3.11,4.967-1.967,6.417-3.81,1.286-1.635,2.4-3.045,4.582-3.12,2.1-.09,3.091,1.218,4.584,3.327,1.417,2,3.026,4.277,6.263,4.394,3.391.114,5.022-2.42,6.467-4.663,1.292-2,2.406-3.734,4.535-3.807,1.959-.073,3.026,1.475,4.529,4.022,1.417,2.4,3.023,5.121,6.324,5.241,3.415.118,5.064-2.863,6.5-5.5,1.245-2.282,2.419-4.437,4.5-4.509,1.959-.046,2.981,1.743,4.492,4.732,1.412,2.79,3.013,5.95,6.365,6.071l.185,0c3.348,0,4.937-3.36,6.343-6.331,1.245-2.634,2.423-5.114,4.444-5.216Z" transform="translate(7.109 -13.11)" fill-rule="evenodd"/>
|
||||
<path id="Path_50" data-name="Path 50" d="M83,186.71h43.71V143H83Z" transform="translate(4.42 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 109.327, 91.085)">
|
||||
<rect id="Rectangle_3" data-name="Rectangle 3" width="92.361" height="36.462" rx="2" transform="translate(0 0)" fill="#d8d8d8"/>
|
||||
<g id="Group_2" data-name="Group 2" transform="translate(1.531 23.03)">
|
||||
<rect id="Rectangle_4" data-name="Rectangle 4" width="5.336" height="5.336" rx="1" transform="translate(16.797 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_5" data-name="Rectangle 5" width="5.336" height="5.336" rx="1" transform="translate(23.12 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_6" data-name="Rectangle 6" width="5.336" height="5.336" rx="1" transform="translate(29.444 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_7" data-name="Rectangle 7" width="5.336" height="5.336" rx="1" transform="translate(35.768 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_8" data-name="Rectangle 8" width="5.336" height="5.336" rx="1" transform="translate(42.091 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_9" data-name="Rectangle 9" width="5.336" height="5.336" rx="1" transform="translate(48.415 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_10" data-name="Rectangle 10" width="5.336" height="5.336" rx="1" transform="translate(54.739 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_11" data-name="Rectangle 11" width="5.336" height="5.336" rx="1" transform="translate(61.063 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_12" data-name="Rectangle 12" width="5.336" height="5.336" rx="1" transform="translate(67.386 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_51" data-name="Path 51" d="M1.093,0H14.518a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0ZM75,0H88.426a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H75a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,75,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_3" data-name="Group 3" transform="translate(1.531 10.261)">
|
||||
<path id="Path_52" data-name="Path 52" d="M1.093,0H6.218A1.093,1.093,0,0,1,7.31,1.093V4.242A1.093,1.093,0,0,1,6.218,5.335H1.093A1.093,1.093,0,0,1,0,4.242V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_13" data-name="Rectangle 13" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_14" data-name="Rectangle 14" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_15" data-name="Rectangle 15" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_16" data-name="Rectangle 16" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_17" data-name="Rectangle 17" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_18" data-name="Rectangle 18" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_19" data-name="Rectangle 19" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_20" data-name="Rectangle 20" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_21" data-name="Rectangle 21" width="5.336" height="5.336" rx="1" transform="translate(58.888 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_22" data-name="Rectangle 22" width="5.336" height="5.336" rx="1" transform="translate(65.212 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_23" data-name="Rectangle 23" width="5.336" height="5.336" rx="1" transform="translate(71.536 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_24" data-name="Rectangle 24" width="5.336" height="5.336" rx="1" transform="translate(77.859 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_25" data-name="Rectangle 25" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_4" data-name="Group 4" transform="translate(91.05 9.546) rotate(180)">
|
||||
<path id="Path_53" data-name="Path 53" d="M1.093,0H6.219A1.093,1.093,0,0,1,7.312,1.093v3.15A1.093,1.093,0,0,1,6.219,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_26" data-name="Rectangle 26" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_27" data-name="Rectangle 27" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_28" data-name="Rectangle 28" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_29" data-name="Rectangle 29" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_30" data-name="Rectangle 30" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_31" data-name="Rectangle 31" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_32" data-name="Rectangle 32" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_33" data-name="Rectangle 33" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_34" data-name="Rectangle 34" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_35" data-name="Rectangle 35" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_36" data-name="Rectangle 36" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_37" data-name="Rectangle 37" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_38" data-name="Rectangle 38" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_39" data-name="Rectangle 39" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_40" data-name="Rectangle 40" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_41" data-name="Rectangle 41" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_42" data-name="Rectangle 42" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_43" data-name="Rectangle 43" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_44" data-name="Rectangle 44" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_45" data-name="Rectangle 45" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_46" data-name="Rectangle 46" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_47" data-name="Rectangle 47" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_48" data-name="Rectangle 48" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_49" data-name="Rectangle 49" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_50" data-name="Rectangle 50" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_51" data-name="Rectangle 51" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_6" data-name="Group 6" transform="translate(1.531 16.584)">
|
||||
<path id="Path_54" data-name="Path 54" d="M1.093,0h7.3A1.093,1.093,0,0,1,9.485,1.093v3.15A1.093,1.093,0,0,1,8.392,5.336h-7.3A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<g id="Group_5" data-name="Group 5" transform="translate(10.671 0)">
|
||||
<rect id="Rectangle_52" data-name="Rectangle 52" width="5.336" height="5.336" rx="1" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_53" data-name="Rectangle 53" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_54" data-name="Rectangle 54" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_55" data-name="Rectangle 55" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_56" data-name="Rectangle 56" width="5.336" height="5.336" rx="1" transform="translate(25.295 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_57" data-name="Rectangle 57" width="5.336" height="5.336" rx="1" transform="translate(31.619 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_58" data-name="Rectangle 58" width="5.336" height="5.336" rx="1" transform="translate(37.942 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_59" data-name="Rectangle 59" width="5.336" height="5.336" rx="1" transform="translate(44.265 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_60" data-name="Rectangle 60" width="5.336" height="5.336" rx="1" transform="translate(50.589 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_61" data-name="Rectangle 61" width="5.336" height="5.336" rx="1" transform="translate(56.912 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_62" data-name="Rectangle 62" width="5.336" height="5.336" rx="1" transform="translate(63.236 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<path id="Path_55" data-name="Path 55" d="M1.094,0H8A1.093,1.093,0,0,1,9.091,1.093v3.15A1.093,1.093,0,0,1,8,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(80.428 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_7" data-name="Group 7" transform="translate(1.531 29.627)">
|
||||
<rect id="Rectangle_63" data-name="Rectangle 63" width="5.336" height="5.336" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_64" data-name="Rectangle 64" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_65" data-name="Rectangle 65" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_66" data-name="Rectangle 66" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_56" data-name="Path 56" d="M1.093,0H31.515a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.244V1.093A1.093,1.093,0,0,1,1.093,0ZM34.687,0h3.942a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H34.687a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,34.687,0Z" transform="translate(25.294 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_67" data-name="Rectangle 67" width="5.336" height="5.336" rx="1" transform="translate(66.003 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_68" data-name="Rectangle 68" width="5.336" height="5.336" rx="1" transform="translate(72.327 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_69" data-name="Rectangle 69" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_57" data-name="Path 57" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(83.59 2.273) rotate(180)" fill="#4a4a4a"/>
|
||||
<path id="Path_58" data-name="Path 58" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(78.255 3.063)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<rect id="Rectangle_70" data-name="Rectangle 70" width="88.927" height="2.371" rx="1.085" transform="translate(1.925 1.17)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_71" data-name="Rectangle 71" width="4.986" height="1.581" rx="0.723" transform="translate(4.1 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_72" data-name="Rectangle 72" width="4.986" height="1.581" rx="0.723" transform="translate(10.923 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_73" data-name="Rectangle 73" width="4.986" height="1.581" rx="0.723" transform="translate(16.173 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_74" data-name="Rectangle 74" width="4.986" height="1.581" rx="0.723" transform="translate(21.421 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_75" data-name="Rectangle 75" width="4.986" height="1.581" rx="0.723" transform="translate(26.671 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_76" data-name="Rectangle 76" width="4.986" height="1.581" rx="0.723" transform="translate(33.232 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_77" data-name="Rectangle 77" width="4.986" height="1.581" rx="0.723" transform="translate(38.48 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_78" data-name="Rectangle 78" width="4.986" height="1.581" rx="0.723" transform="translate(43.73 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_79" data-name="Rectangle 79" width="4.986" height="1.581" rx="0.723" transform="translate(48.978 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_80" data-name="Rectangle 80" width="4.986" height="1.581" rx="0.723" transform="translate(55.54 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_81" data-name="Rectangle 81" width="4.986" height="1.581" rx="0.723" transform="translate(60.788 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_82" data-name="Rectangle 82" width="4.986" height="1.581" rx="0.723" transform="translate(66.038 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_83" data-name="Rectangle 83" width="4.986" height="1.581" rx="0.723" transform="translate(72.599 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_84" data-name="Rectangle 84" width="4.986" height="1.581" rx="0.723" transform="translate(77.847 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_85" data-name="Rectangle 85" width="4.986" height="1.581" rx="0.723" transform="translate(83.097 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
</g>
|
||||
<path id="Path_59" data-name="Path 59" d="M146.71,159.855a5.439,5.439,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(6.275 -6.025)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_60" data-name="Path 60" d="M83,124.855h43.71V103H83Z" transform="translate(4.42 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_61" data-name="Path 61" d="M134.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.811,2.811,0,0,0,.349.035" transform="translate(7.202 -9.377)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_62" data-name="Path 62" d="M143.232,42.33a2.967,2.967,0,0,1-.535-.055,2.754,2.754,0,0,1-.514-.153,2.838,2.838,0,0,1-.471-.251,4.139,4.139,0,0,1-.415-.339,3.2,3.2,0,0,1-.338-.415A2.7,2.7,0,0,1,140.5,39.6a2.968,2.968,0,0,1,.055-.535,3.152,3.152,0,0,1,.152-.514,2.874,2.874,0,0,1,.252-.47,2.633,2.633,0,0,1,.753-.754,2.837,2.837,0,0,1,.471-.251,2.753,2.753,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,4.019,4.019,0,0,1,.339.415,2.786,2.786,0,0,1,.251.47,2.864,2.864,0,0,1,.208,1.049,2.77,2.77,0,0,1-.8,1.934,4.139,4.139,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459m21.855-1.366a2.789,2.789,0,0,1-1.935-.8,4.162,4.162,0,0,1-.338-.415,2.7,2.7,0,0,1-.459-1.519,2.789,2.789,0,0,1,.8-1.934,4.139,4.139,0,0,1,.415-.339,2.838,2.838,0,0,1,.471-.251,2.752,2.752,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,2.79,2.79,0,0,1,.8,1.934,3.069,3.069,0,0,1-.055.535,2.779,2.779,0,0,1-.153.514,3.885,3.885,0,0,1-.251.47,4.02,4.02,0,0,1-.339.415,4.138,4.138,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459" transform="translate(9.753 -15.532)" fill-rule="evenodd"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 31 KiB |
170
images/undraw_docusaurus_react.svg
Normal file
@ -0,0 +1,170 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1041.277" height="554.141" viewBox="0 0 1041.277 554.141">
|
||||
<title>Powered by React</title>
|
||||
<g id="Group_24" data-name="Group 24" transform="translate(-440 -263)">
|
||||
<g id="Group_23" data-name="Group 23" transform="translate(439.989 262.965)">
|
||||
<path id="Path_299" data-name="Path 299" d="M1040.82,611.12q-1.74,3.75-3.47,7.4-2.7,5.67-5.33,11.12c-.78,1.61-1.56,3.19-2.32,4.77-8.6,17.57-16.63,33.11-23.45,45.89A73.21,73.21,0,0,1,942.44,719l-151.65,1.65h-1.6l-13,.14-11.12.12-34.1.37h-1.38l-17.36.19h-.53l-107,1.16-95.51,1-11.11.12-69,.75H429l-44.75.48h-.48l-141.5,1.53-42.33.46a87.991,87.991,0,0,1-10.79-.54h0c-1.22-.14-2.44-.3-3.65-.49a87.38,87.38,0,0,1-51.29-27.54C116,678.37,102.75,655,93.85,629.64q-1.93-5.49-3.6-11.12C59.44,514.37,97,380,164.6,290.08q4.25-5.64,8.64-11l.07-.08c20.79-25.52,44.1-46.84,68.93-62,44-26.91,92.75-34.49,140.7-11.9,40.57,19.12,78.45,28.11,115.17,30.55,3.71.24,7.42.42,11.11.53,84.23,2.65,163.17-27.7,255.87-47.29,3.69-.78,7.39-1.55,11.12-2.28,66.13-13.16,139.49-20.1,226.73-5.51a189.089,189.089,0,0,1,26.76,6.4q5.77,1.86,11.12,4c41.64,16.94,64.35,48.24,74,87.46q1.37,5.46,2.37,11.11C1134.3,384.41,1084.19,518.23,1040.82,611.12Z" transform="translate(-79.34 -172.91)" fill="#f2f2f2"/>
|
||||
<path id="Path_300" data-name="Path 300" d="M576.36,618.52a95.21,95.21,0,0,1-1.87,11.12h93.7V618.52Zm-78.25,62.81,11.11-.09V653.77c-3.81-.17-7.52-.34-11.11-.52ZM265.19,618.52v11.12h198.5V618.52ZM1114.87,279h-74V191.51q-5.35-2.17-11.12-4V279H776.21V186.58c-3.73.73-7.43,1.5-11.12,2.28V279H509.22V236.15c-3.69-.11-7.4-.29-11.11-.53V279H242.24V217c-24.83,15.16-48.14,36.48-68.93,62h-.07v.08q-4.4,5.4-8.64,11h8.64V618.52h-83q1.66,5.63,3.6,11.12h79.39v93.62a87,87,0,0,0,12.2,2.79c1.21.19,2.43.35,3.65.49h0a87.991,87.991,0,0,0,10.79.54l42.33-.46v-97H498.11v94.21l11.11-.12V629.64H765.09V721l11.12-.12V629.64H1029.7v4.77c.76-1.58,1.54-3.16,2.32-4.77q2.63-5.45,5.33-11.12,1.73-3.64,3.47-7.4v-321h76.42Q1116.23,284.43,1114.87,279ZM242.24,618.52V290.08H498.11V618.52Zm267,0V290.08H765.09V618.52Zm520.48,0H776.21V290.08H1029.7Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_301" data-name="Path 301" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" fill="#65617d"/>
|
||||
<path id="Path_302" data-name="Path 302" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" opacity="0.2"/>
|
||||
<path id="Path_303" data-name="Path 303" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
<path id="Path_304" data-name="Path 304" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_305" data-name="Path 305" d="M377.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
<rect id="Rectangle_137" data-name="Rectangle 137" width="47.17" height="31.5" transform="translate(680.92 483.65)" fill="#3f3d56"/>
|
||||
<rect id="Rectangle_138" data-name="Rectangle 138" width="47.17" height="31.5" transform="translate(680.92 483.65)" opacity="0.1"/>
|
||||
<rect id="Rectangle_139" data-name="Rectangle 139" width="47.17" height="31.5" transform="translate(678.92 483.65)" fill="#3f3d56"/>
|
||||
<path id="Path_306" data-name="Path 306" d="M298.09,483.65v4.97l-47.17,1.26v-6.23Z" opacity="0.1"/>
|
||||
<path id="Path_307" data-name="Path 307" d="M460.69,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6a4,4,0,0,1,3.95,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
|
||||
<path id="Path_308" data-name="Path 308" d="M265.19,481.32v181.2h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_309" data-name="Path 309" d="M194.59,319.15h177.5V467.4l-177.5,4Z" fill="#39374d"/>
|
||||
<path id="Path_310" data-name="Path 310" d="M726.09,483.65v6.41l-47.17-1.26v-5.15Z" opacity="0.1"/>
|
||||
<path id="Path_311" data-name="Path 311" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0L672,657.42a4,4,0,0,1-3.85-3.95V485.27a4,4,0,0,1,3.95-3.95H863.7a4,4,0,0,1,3.99,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
|
||||
<path id="Path_312" data-name="Path 312" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0V481.32h0a4,4,0,0,1,4,3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_313" data-name="Path 313" d="M775.59,319.15H598.09V467.4l177.5,4Z" fill="#39374d"/>
|
||||
<path id="Path_314" data-name="Path 314" d="M663.19,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h0a4,4,0,0,1-4-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6A4,4,0,0,1,663.19,485.27Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
|
||||
<path id="Path_315" data-name="Path 315" d="M397.09,319.15h177.5V467.4l-177.5,4Z" fill="#4267b2"/>
|
||||
<path id="Path_316" data-name="Path 316" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l202.51-1.33h.48l40.99-.28h.19l283.08-1.87h.29l.17-.01h.47l4.79-.03h1.46l74.49-.5,4.4-.02.98-.01Z" opacity="0.1"/>
|
||||
<circle id="Ellipse_111" data-name="Ellipse 111" cx="51.33" cy="51.33" r="51.33" transform="translate(435.93 246.82)" fill="#fbbebe"/>
|
||||
<path id="Path_317" data-name="Path 317" d="M617.94,550.07s-99.5,12-90,0c3.44-4.34,4.39-17.2,4.2-31.85-.06-4.45-.22-9.06-.45-13.65-1.1-22-3.75-43.5-3.75-43.5s87-41,77-8.5c-4,13.13-2.69,31.57.35,48.88.89,5.05,1.92,10,3,14.7a344.66,344.66,0,0,0,9.65,33.92Z" transform="translate(-79.34 -172.91)" fill="#fbbebe"/>
|
||||
<path id="Path_318" data-name="Path 318" d="M585.47,546c11.51-2.13,23.7-6,34.53-1.54,2.85,1.17,5.47,2.88,8.39,3.86s6.12,1.22,9.16,1.91c10.68,2.42,19.34,10.55,24.9,20s8.44,20.14,11.26,30.72l6.9,25.83c6,22.45,12,45.09,13.39,68.3a2437.506,2437.506,0,0,1-250.84,1.43c5.44-10.34,11-21.31,10.54-33s-7.19-23.22-4.76-34.74c1.55-7.34,6.57-13.39,9.64-20.22,8.75-19.52,1.94-45.79,17.32-60.65,6.92-6.68,17-9.21,26.63-8.89,12.28.41,24.85,4.24,37,6.11C555.09,547.48,569.79,548.88,585.47,546Z" transform="translate(-79.34 -172.91)" fill="#ff6584"/>
|
||||
<path id="Path_319" data-name="Path 319" d="M716.37,657.17l-.1,1.43v.1l-.17,2.3-1.33,18.51-1.61,22.3-.46,6.28-1,13.44v.17l-107,1-175.59,1.9v.84h-.14v-1.12l.45-14.36.86-28.06.74-23.79.07-2.37a10.53,10.53,0,0,1,11.42-10.17c4.72.4,10.85.89,18.18,1.41l3,.22c42.33,2.94,120.56,6.74,199.5,2,1.66-.09,3.33-.19,5-.31,12.24-.77,24.47-1.76,36.58-3a10.53,10.53,0,0,1,11.6,11.23Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_320" data-name="Path 320" d="M429.08,725.44v-.84l175.62-1.91,107-1h.3v-.17l1-13.44.43-6,1.64-22.61,1.29-17.9v-.44a10.617,10.617,0,0,0-.11-2.47.3.3,0,0,0,0-.1,10.391,10.391,0,0,0-2-4.64,10.54,10.54,0,0,0-9.42-4c-12.11,1.24-24.34,2.23-36.58,3-1.67.12-3.34.22-5,.31-78.94,4.69-157.17.89-199.5-2l-3-.22c-7.33-.52-13.46-1-18.18-1.41a10.54,10.54,0,0,0-11.24,8.53,11,11,0,0,0-.18,1.64l-.68,22.16L429.54,710l-.44,14.36v1.12Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
<path id="Path_321" data-name="Path 321" d="M716.67,664.18l-1.23,15.33-1.83,22.85-.46,5.72-1,12.81-.06.64v.17h0l-.15,1.48.11-1.48h-.29l-107,1-175.65,1.9v-.28l.49-14.36,1-28.06.64-18.65A6.36,6.36,0,0,1,434.3,658a6.25,6.25,0,0,1,3.78-.9c2.1.17,4.68.37,7.69.59,4.89.36,10.92.78,17.94,1.22,13,.82,29.31,1.7,48,2.42,52,2,122.2,2.67,188.88-3.17,3-.26,6.1-.55,9.13-.84a6.26,6.26,0,0,1,3.48.66,5.159,5.159,0,0,1,.86.54,6.14,6.14,0,0,1,2,2.46,3.564,3.564,0,0,1,.25.61A6.279,6.279,0,0,1,716.67,664.18Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_322" data-name="Path 322" d="M377.44,677.87v3.19a6.13,6.13,0,0,1-3.5,5.54l-40.1.77a6.12,6.12,0,0,1-3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_323" data-name="Path 323" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/>
|
||||
<path id="Path_324" data-name="Path 324" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" opacity="0.1"/>
|
||||
<path id="Path_325" data-name="Path 325" d="M300.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/>
|
||||
<path id="Path_326" data-name="Path 326" d="M758.56,679.87v3.19a6.13,6.13,0,0,0,3.5,5.54l40.1.77a6.12,6.12,0,0,0,3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_327" data-name="Path 327" d="M678.72,517.57l52.25,1V509.9l-52.25-1Z" opacity="0.1"/>
|
||||
<path id="Path_328" data-name="Path 328" d="M676.72,517.57l52.25,1V509.9l-52.25-1Z" fill="#3f3d56"/>
|
||||
<path id="Path_329" data-name="Path 329" d="M534.13,486.79c.08,7-3.16,13.6-5.91,20.07a163.491,163.491,0,0,0-12.66,74.71c.73,11,2.58,22,.73,32.9s-8.43,21.77-19,24.9c17.53,10.45,41.26,9.35,57.76-2.66,8.79-6.4,15.34-15.33,21.75-24.11a97.86,97.86,0,0,1-13.31,44.75A103.43,103.43,0,0,0,637,616.53c4.31-5.81,8.06-12.19,9.72-19.23,3.09-13-1.22-26.51-4.51-39.5a266.055,266.055,0,0,1-6.17-33c-.43-3.56-.78-7.22.1-10.7,1-4.07,3.67-7.51,5.64-11.22,5.6-10.54,5.73-23.3,2.86-34.88s-8.49-22.26-14.06-32.81c-4.46-8.46-9.3-17.31-17.46-22.28-5.1-3.1-11-4.39-16.88-5.64l-25.37-5.43c-5.55-1.19-11.26-2.38-16.87-1.51-9.47,1.48-16.14,8.32-22,15.34-4.59,5.46-15.81,15.71-16.6,22.86-.72,6.59,5.1,17.63,6.09,24.58,1.3,9,2.22,6,7.3,11.52C532,478.05,534.07,482,534.13,486.79Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
</g>
|
||||
<g id="docusaurus_keytar" transform="translate(670.271 615.768)">
|
||||
<path id="Path_40" data-name="Path 40" d="M99,52h43.635V69.662H99Z" transform="translate(-49.132 -33.936)" fill="#fff" fill-rule="evenodd"/>
|
||||
<path id="Path_41" data-name="Path 41" d="M13.389,158.195A10.377,10.377,0,0,1,4.4,153a10.377,10.377,0,0,0,8.988,15.584H23.779V158.195Z" transform="translate(-3 -82.47)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_42" data-name="Path 42" d="M66.967,38.083l36.373-2.273V30.615A10.389,10.389,0,0,0,92.95,20.226H46.2l-1.3-2.249a1.5,1.5,0,0,0-2.6,0L41,20.226l-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-.034,0-2.152-2.151a1.5,1.5,0,0,0-2.508.672L25.21,21.4l-2.7-.723a1.5,1.5,0,0,0-1.836,1.837l.722,2.7-2.65.71a1.5,1.5,0,0,0-.673,2.509l2.152,2.152c0,.011,0,.022,0,.033l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6L20.226,41l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3A10.389,10.389,0,0,0,30.615,103.34H92.95A10.389,10.389,0,0,0,103.34,92.95V51.393L66.967,49.12a5.53,5.53,0,0,1,0-11.038" transform="translate(-9.836 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_43" data-name="Path 43" d="M143,163.779h15.584V143H143Z" transform="translate(-70.275 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_44" data-name="Path 44" d="M173.779,148.389a2.582,2.582,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-75.08 -75.262)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_45" data-name="Path 45" d="M153,113.389h15.584V103H153Z" transform="translate(-75.08 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_46" data-name="Path 46" d="M183.389,108.944a1.3,1.3,0,1,0,0-2.6,1.336,1.336,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.337,1.337,0,0,0,.166.017" transform="translate(-84.691 -57.894)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_47" data-name="Path 47" d="M52.188,48.292a1.3,1.3,0,0,1-1.3-1.3,3.9,3.9,0,0,0-7.792,0,1.3,1.3,0,1,1-2.6,0,6.493,6.493,0,0,1,12.987,0,1.3,1.3,0,0,1-1.3,1.3" transform="translate(-21.02 -28.41)" fill-rule="evenodd"/>
|
||||
<path id="Path_48" data-name="Path 48" d="M103,139.752h31.168a10.389,10.389,0,0,0,10.389-10.389V93H113.389A10.389,10.389,0,0,0,103,103.389Z" transform="translate(-51.054 -53.638)" fill="#ffff50" fill-rule="evenodd"/>
|
||||
<path id="Path_49" data-name="Path 49" d="M141.1,94.017H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0-25.877H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.293H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m7.782-47.993c-.006,0-.011,0-.018,0-1.605.055-2.365,1.66-3.035,3.077-.7,1.48-1.24,2.443-2.126,2.414-.981-.035-1.542-1.144-2.137-2.317-.683-1.347-1.462-2.876-3.1-2.819-1.582.054-2.344,1.451-3.017,2.684-.715,1.313-1.2,2.112-2.141,2.075-1-.036-1.533-.938-2.149-1.981-.686-1.162-1.479-2.467-3.084-2.423-1.555.053-2.319,1.239-2.994,2.286-.713,1.106-1.213,1.781-2.164,1.741-1.025-.036-1.554-.784-2.167-1.65-.688-.973-1.463-2.074-3.062-2.021a3.815,3.815,0,0,0-2.959,1.879c-.64.812-1.14,1.456-2.2,1.415a.52.52,0,0,0-.037,1.039,3.588,3.588,0,0,0,3.05-1.811c.611-.777,1.139-1.448,2.178-1.483,1-.043,1.47.579,2.179,1.582.674.953,1.438,2.033,2.977,2.089,1.612.054,2.387-1.151,3.074-2.217.614-.953,1.144-1.775,2.156-1.81.931-.035,1.438.7,2.153,1.912.674,1.141,1.437,2.434,3.006,2.491,1.623.056,2.407-1.361,3.09-2.616.592-1.085,1.15-2.109,2.14-2.143.931-.022,1.417.829,2.135,2.249.671,1.326,1.432,2.828,3.026,2.886l.088,0c1.592,0,2.347-1.6,3.015-3.01.592-1.252,1.152-2.431,2.113-2.479Z" transform="translate(-55.378 -38.552)" fill-rule="evenodd"/>
|
||||
<path id="Path_50" data-name="Path 50" d="M83,163.779h20.779V143H83Z" transform="translate(-41.443 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 51.971, 43.3)">
|
||||
<rect id="Rectangle_3" data-name="Rectangle 3" width="43.906" height="17.333" rx="2" transform="translate(0 0)" fill="#d8d8d8"/>
|
||||
<g id="Group_2" data-name="Group 2" transform="translate(0.728 10.948)">
|
||||
<rect id="Rectangle_4" data-name="Rectangle 4" width="2.537" height="2.537" rx="1" transform="translate(7.985 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_5" data-name="Rectangle 5" width="2.537" height="2.537" rx="1" transform="translate(10.991 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_6" data-name="Rectangle 6" width="2.537" height="2.537" rx="1" transform="translate(13.997 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_7" data-name="Rectangle 7" width="2.537" height="2.537" rx="1" transform="translate(17.003 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_8" data-name="Rectangle 8" width="2.537" height="2.537" rx="1" transform="translate(20.009 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_9" data-name="Rectangle 9" width="2.537" height="2.537" rx="1" transform="translate(23.015 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_10" data-name="Rectangle 10" width="2.537" height="2.537" rx="1" transform="translate(26.021 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_11" data-name="Rectangle 11" width="2.537" height="2.537" rx="1" transform="translate(29.028 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_12" data-name="Rectangle 12" width="2.537" height="2.537" rx="1" transform="translate(32.034 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_51" data-name="Path 51" d="M.519,0H6.9A.519.519,0,0,1,7.421.52v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0ZM35.653,0h6.383a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H35.652a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,35.652,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_3" data-name="Group 3" transform="translate(0.728 4.878)">
|
||||
<path id="Path_52" data-name="Path 52" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_13" data-name="Rectangle 13" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_14" data-name="Rectangle 14" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_15" data-name="Rectangle 15" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_16" data-name="Rectangle 16" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_17" data-name="Rectangle 17" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_18" data-name="Rectangle 18" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_19" data-name="Rectangle 19" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_20" data-name="Rectangle 20" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_21" data-name="Rectangle 21" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_22" data-name="Rectangle 22" width="2.537" height="2.537" rx="1" transform="translate(31 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_23" data-name="Rectangle 23" width="2.537" height="2.537" rx="1" transform="translate(34.006 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_24" data-name="Rectangle 24" width="2.537" height="2.537" rx="1" transform="translate(37.012 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_25" data-name="Rectangle 25" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_4" data-name="Group 4" transform="translate(43.283 4.538) rotate(180)">
|
||||
<path id="Path_53" data-name="Path 53" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_26" data-name="Rectangle 26" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_27" data-name="Rectangle 27" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_28" data-name="Rectangle 28" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_29" data-name="Rectangle 29" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_30" data-name="Rectangle 30" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_31" data-name="Rectangle 31" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_32" data-name="Rectangle 32" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_33" data-name="Rectangle 33" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_34" data-name="Rectangle 34" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_35" data-name="Rectangle 35" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_36" data-name="Rectangle 36" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_37" data-name="Rectangle 37" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_38" data-name="Rectangle 38" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_39" data-name="Rectangle 39" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_40" data-name="Rectangle 40" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_41" data-name="Rectangle 41" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_42" data-name="Rectangle 42" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_43" data-name="Rectangle 43" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_44" data-name="Rectangle 44" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_45" data-name="Rectangle 45" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_46" data-name="Rectangle 46" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_47" data-name="Rectangle 47" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_48" data-name="Rectangle 48" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_49" data-name="Rectangle 49" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_50" data-name="Rectangle 50" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_51" data-name="Rectangle 51" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_6" data-name="Group 6" transform="translate(0.728 7.883)">
|
||||
<path id="Path_54" data-name="Path 54" d="M.519,0h3.47a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<g id="Group_5" data-name="Group 5" transform="translate(5.073 0)">
|
||||
<rect id="Rectangle_52" data-name="Rectangle 52" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_53" data-name="Rectangle 53" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_54" data-name="Rectangle 54" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_55" data-name="Rectangle 55" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_56" data-name="Rectangle 56" width="2.537" height="2.537" rx="1" transform="translate(12.025 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_57" data-name="Rectangle 57" width="2.537" height="2.537" rx="1" transform="translate(15.031 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_58" data-name="Rectangle 58" width="2.537" height="2.537" rx="1" transform="translate(18.037 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_59" data-name="Rectangle 59" width="2.537" height="2.537" rx="1" transform="translate(21.042 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_60" data-name="Rectangle 60" width="2.537" height="2.537" rx="1" transform="translate(24.049 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_61" data-name="Rectangle 61" width="2.537" height="2.537" rx="1" transform="translate(27.055 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_62" data-name="Rectangle 62" width="2.537" height="2.537" rx="1" transform="translate(30.061 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<path id="Path_55" data-name="Path 55" d="M.52,0H3.8a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(38.234 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_7" data-name="Group 7" transform="translate(0.728 14.084)">
|
||||
<rect id="Rectangle_63" data-name="Rectangle 63" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_64" data-name="Rectangle 64" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_65" data-name="Rectangle 65" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_66" data-name="Rectangle 66" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_56" data-name="Path 56" d="M.519,0H14.981A.519.519,0,0,1,15.5.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.018V.519A.519.519,0,0,1,.519,0Zm15.97,0h1.874a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H16.489a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,16.489,0Z" transform="translate(12.024 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_67" data-name="Rectangle 67" width="2.537" height="2.537" rx="1" transform="translate(31.376 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_68" data-name="Rectangle 68" width="2.537" height="2.537" rx="1" transform="translate(34.382 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_69" data-name="Rectangle 69" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_57" data-name="Path 57" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(39.736 1.08) rotate(180)" fill="#4a4a4a"/>
|
||||
<path id="Path_58" data-name="Path 58" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(37.2 1.456)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<rect id="Rectangle_70" data-name="Rectangle 70" width="42.273" height="1.127" rx="0.564" transform="translate(0.915 0.556)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_71" data-name="Rectangle 71" width="2.37" height="0.752" rx="0.376" transform="translate(1.949 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_72" data-name="Rectangle 72" width="2.37" height="0.752" rx="0.376" transform="translate(5.193 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_73" data-name="Rectangle 73" width="2.37" height="0.752" rx="0.376" transform="translate(7.688 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_74" data-name="Rectangle 74" width="2.37" height="0.752" rx="0.376" transform="translate(10.183 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_75" data-name="Rectangle 75" width="2.37" height="0.752" rx="0.376" transform="translate(12.679 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_76" data-name="Rectangle 76" width="2.37" height="0.752" rx="0.376" transform="translate(15.797 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_77" data-name="Rectangle 77" width="2.37" height="0.752" rx="0.376" transform="translate(18.292 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_78" data-name="Rectangle 78" width="2.37" height="0.752" rx="0.376" transform="translate(20.788 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_79" data-name="Rectangle 79" width="2.37" height="0.752" rx="0.376" transform="translate(23.283 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_80" data-name="Rectangle 80" width="2.37" height="0.752" rx="0.376" transform="translate(26.402 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_81" data-name="Rectangle 81" width="2.37" height="0.752" rx="0.376" transform="translate(28.897 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_82" data-name="Rectangle 82" width="2.37" height="0.752" rx="0.376" transform="translate(31.393 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_83" data-name="Rectangle 83" width="2.37" height="0.752" rx="0.376" transform="translate(34.512 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_84" data-name="Rectangle 84" width="2.37" height="0.752" rx="0.376" transform="translate(37.007 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_85" data-name="Rectangle 85" width="2.37" height="0.752" rx="0.376" transform="translate(39.502 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
</g>
|
||||
<path id="Path_59" data-name="Path 59" d="M123.779,148.389a2.583,2.583,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-51.054 -75.262)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_60" data-name="Path 60" d="M83,113.389h20.779V103H83Z" transform="translate(-41.443 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_61" data-name="Path 61" d="M123.389,108.944a1.3,1.3,0,1,0,0-2.6,1.338,1.338,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.335,1.335,0,0,0,.166.017" transform="translate(-55.859 -57.894)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_62" data-name="Path 62" d="M141.8,38.745a1.41,1.41,0,0,1-.255-.026,1.309,1.309,0,0,1-.244-.073,1.349,1.349,0,0,1-.224-.119,1.967,1.967,0,0,1-.2-.161,1.52,1.52,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.41,1.41,0,0,1,.026-.255,1.5,1.5,0,0,1,.072-.244,1.364,1.364,0,0,1,.12-.223,1.252,1.252,0,0,1,.358-.358,1.349,1.349,0,0,1,.224-.119,1.309,1.309,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.968,1.968,0,0,1,.2.161,1.908,1.908,0,0,1,.161.2,1.322,1.322,0,0,1,.12.223,1.361,1.361,0,0,1,.1.5,1.317,1.317,0,0,1-.379.919,1.968,1.968,0,0,1-.2.161,1.346,1.346,0,0,1-.223.119,1.332,1.332,0,0,1-.5.1m10.389-.649a1.326,1.326,0,0,1-.92-.379,1.979,1.979,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.326,1.326,0,0,1,.379-.919,1.967,1.967,0,0,1,.2-.161,1.351,1.351,0,0,1,.224-.119,1.308,1.308,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.967,1.967,0,0,1,.2.161,1.326,1.326,0,0,1,.379.919,1.461,1.461,0,0,1-.026.255,1.323,1.323,0,0,1-.073.244,1.847,1.847,0,0,1-.119.223,1.911,1.911,0,0,1-.161.2,1.967,1.967,0,0,1-.2.161,1.294,1.294,0,0,1-.722.218" transform="translate(-69.074 -26.006)" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="React-icon" transform="translate(906.3 541.56)">
|
||||
<path id="Path_330" data-name="Path 330" d="M263.668,117.179c0-5.827-7.3-11.35-18.487-14.775,2.582-11.4,1.434-20.477-3.622-23.382a7.861,7.861,0,0,0-4.016-1v4a4.152,4.152,0,0,1,2.044.466c2.439,1.4,3.5,6.724,2.672,13.574-.2,1.685-.52,3.461-.914,5.272a86.9,86.9,0,0,0-11.386-1.954,87.469,87.469,0,0,0-7.459-8.965c5.845-5.433,11.332-8.41,15.062-8.41V78h0c-4.931,0-11.386,3.514-17.913,9.611-6.527-6.061-12.982-9.539-17.913-9.539v4c3.712,0,9.216,2.959,15.062,8.356a84.687,84.687,0,0,0-7.405,8.947,83.732,83.732,0,0,0-11.4,1.972c-.412-1.793-.717-3.532-.932-5.2-.843-6.85.2-12.175,2.618-13.592a3.991,3.991,0,0,1,2.062-.466v-4h0a8,8,0,0,0-4.052,1c-5.039,2.9-6.168,11.96-3.568,23.328-11.153,3.443-18.415,8.947-18.415,14.757,0,5.828,7.3,11.35,18.487,14.775-2.582,11.4-1.434,20.477,3.622,23.382a7.882,7.882,0,0,0,4.034,1c4.931,0,11.386-3.514,17.913-9.611,6.527,6.061,12.982,9.539,17.913,9.539a8,8,0,0,0,4.052-1c5.039-2.9,6.168-11.96,3.568-23.328C256.406,128.511,263.668,122.988,263.668,117.179Zm-23.346-11.96c-.663,2.313-1.488,4.7-2.421,7.083-.735-1.434-1.506-2.869-2.349-4.3-.825-1.434-1.7-2.833-2.582-4.2C235.517,104.179,237.974,104.645,240.323,105.219Zm-8.212,19.1c-1.4,2.421-2.833,4.716-4.321,6.85-2.672.233-5.379.359-8.1.359-2.708,0-5.415-.126-8.069-.341q-2.232-3.2-4.339-6.814-2.044-3.523-3.73-7.136c1.112-2.4,2.367-4.805,3.712-7.154,1.4-2.421,2.833-4.716,4.321-6.85,2.672-.233,5.379-.359,8.1-.359,2.708,0,5.415.126,8.069.341q2.232,3.2,4.339,6.814,2.044,3.523,3.73,7.136C234.692,119.564,233.455,121.966,232.11,124.315Zm5.792-2.331c.968,2.4,1.793,4.805,2.474,7.136-2.349.574-4.823,1.058-7.387,1.434.879-1.381,1.757-2.8,2.582-4.25C236.4,124.871,237.167,123.419,237.9,121.984ZM219.72,141.116a73.921,73.921,0,0,1-4.985-5.738c1.614.072,3.263.126,4.931.126,1.685,0,3.353-.036,4.985-.126A69.993,69.993,0,0,1,219.72,141.116ZM206.38,130.555c-2.546-.377-5-.843-7.352-1.417.663-2.313,1.488-4.7,2.421-7.083.735,1.434,1.506,2.869,2.349,4.3S205.5,129.192,206.38,130.555ZM219.63,93.241a73.924,73.924,0,0,1,4.985,5.738c-1.614-.072-3.263-.126-4.931-.126-1.686,0-3.353.036-4.985.126A69.993,69.993,0,0,1,219.63,93.241ZM206.362,103.8c-.879,1.381-1.757,2.8-2.582,4.25-.825,1.434-1.6,2.869-2.331,4.3-.968-2.4-1.793-4.805-2.474-7.136C201.323,104.663,203.8,104.179,206.362,103.8Zm-16.227,22.449c-6.348-2.708-10.454-6.258-10.454-9.073s4.106-6.383,10.454-9.073c1.542-.663,3.228-1.255,4.967-1.811a86.122,86.122,0,0,0,4.034,10.92,84.9,84.9,0,0,0-3.981,10.866C193.38,127.525,191.694,126.915,190.134,126.252Zm9.647,25.623c-2.439-1.4-3.5-6.724-2.672-13.574.2-1.686.52-3.461.914-5.272a86.9,86.9,0,0,0,11.386,1.954,87.465,87.465,0,0,0,7.459,8.965c-5.845,5.433-11.332,8.41-15.062,8.41A4.279,4.279,0,0,1,199.781,151.875Zm42.532-13.663c.843,6.85-.2,12.175-2.618,13.592a3.99,3.99,0,0,1-2.062.466c-3.712,0-9.216-2.959-15.062-8.356a84.689,84.689,0,0,0,7.405-8.947,83.731,83.731,0,0,0,11.4-1.972A50.194,50.194,0,0,1,242.313,138.212Zm6.9-11.96c-1.542.663-3.228,1.255-4.967,1.811a86.12,86.12,0,0,0-4.034-10.92,84.9,84.9,0,0,0,3.981-10.866c1.775.556,3.461,1.165,5.039,1.829,6.348,2.708,10.454,6.258,10.454,9.073C259.67,119.994,255.564,123.562,249.216,126.252Z" fill="#61dafb"/>
|
||||
<path id="Path_331" data-name="Path 331" d="M320.8,78.4Z" transform="translate(-119.082 -0.328)" fill="#61dafb"/>
|
||||
<circle id="Ellipse_112" data-name="Ellipse 112" cx="8.194" cy="8.194" r="8.194" transform="translate(211.472 108.984)" fill="#61dafb"/>
|
||||
<path id="Path_332" data-name="Path 332" d="M520.5,78.1Z" transform="translate(-282.975 -0.082)" fill="#61dafb"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 35 KiB |
40
images/undraw_docusaurus_tree.svg
Normal file
@ -0,0 +1,40 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1129" height="663" viewBox="0 0 1129 663">
|
||||
<title>Focus on What Matters</title>
|
||||
<circle cx="321" cy="321" r="321" fill="#f2f2f2" />
|
||||
<ellipse cx="559" cy="635.49998" rx="514" ry="27.50002" fill="#3f3d56" />
|
||||
<ellipse cx="558" cy="627" rx="460" ry="22" opacity="0.2" />
|
||||
<rect x="131" y="152.5" width="840" height="50" fill="#3f3d56" />
|
||||
<path d="M166.5,727.3299A21.67009,21.67009,0,0,0,188.1701,749H984.8299A21.67009,21.67009,0,0,0,1006.5,727.3299V296h-840Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
|
||||
<path d="M984.8299,236H188.1701A21.67009,21.67009,0,0,0,166.5,257.6701V296h840V257.6701A21.67009,21.67009,0,0,0,984.8299,236Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
|
||||
<path d="M984.8299,236H188.1701A21.67009,21.67009,0,0,0,166.5,257.6701V296h840V257.6701A21.67009,21.67009,0,0,0,984.8299,236Z" transform="translate(-35.5 -118.5)" opacity="0.2" />
|
||||
<circle cx="181" cy="147.5" r="13" fill="#3f3d56" />
|
||||
<circle cx="217" cy="147.5" r="13" fill="#3f3d56" />
|
||||
<circle cx="253" cy="147.5" r="13" fill="#3f3d56" />
|
||||
<rect x="168" y="213.5" width="337" height="386" rx="5.33505" fill="#606060" />
|
||||
<rect x="603" y="272.5" width="284" height="22" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="352.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="396.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="440.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="484.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="865" y="552.5" width="88" height="26" rx="7.02756" fill="#3ecc5f" />
|
||||
<path d="M1088.60287,624.61594a30.11371,30.11371,0,0,0,3.98291-15.266c0-13.79652-8.54358-24.98081-19.08256-24.98081s-19.08256,11.18429-19.08256,24.98081a30.11411,30.11411,0,0,0,3.98291,15.266,31.248,31.248,0,0,0,0,30.53213,31.248,31.248,0,0,0,0,30.53208,31.248,31.248,0,0,0,0,30.53208,30.11408,30.11408,0,0,0-3.98291,15.266c0,13.79652,8.54353,24.98081,19.08256,24.98081s19.08256-11.18429,19.08256-24.98081a30.11368,30.11368,0,0,0-3.98291-15.266,31.248,31.248,0,0,0,0-30.53208,31.248,31.248,0,0,0,0-30.53208,31.248,31.248,0,0,0,0-30.53213Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
|
||||
<ellipse cx="1038.00321" cy="460.31783" rx="19.08256" ry="24.9808" fill="#3f3d56" />
|
||||
<ellipse cx="1038.00321" cy="429.78574" rx="19.08256" ry="24.9808" fill="#3f3d56" />
|
||||
<path d="M1144.93871,339.34489a91.61081,91.61081,0,0,0,7.10658-10.46092l-50.141-8.23491,54.22885.4033a91.566,91.566,0,0,0,1.74556-72.42605l-72.75449,37.74139,67.09658-49.32086a91.41255,91.41255,0,1,0-150.971,102.29805,91.45842,91.45842,0,0,0-10.42451,16.66946l65.0866,33.81447-69.40046-23.292a91.46011,91.46011,0,0,0,14.73837,85.83669,91.40575,91.40575,0,1,0,143.68892,0,91.41808,91.41808,0,0,0,0-113.02862Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M981.6885,395.8592a91.01343,91.01343,0,0,0,19.56129,56.51431,91.40575,91.40575,0,1,0,143.68892,0C1157.18982,436.82067,981.6885,385.60008,981.6885,395.8592Z" transform="translate(-35.5 -118.5)" opacity="0.1" />
|
||||
<path d="M365.62,461.43628H477.094v45.12043H365.62Z" transform="translate(-35.5 -118.5)" fill="#fff" fill-rule="evenodd" />
|
||||
<path d="M264.76252,608.74122a26.50931,26.50931,0,0,1-22.96231-13.27072,26.50976,26.50976,0,0,0,22.96231,39.81215H291.304V608.74122Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M384.17242,468.57061l92.92155-5.80726V449.49263a26.54091,26.54091,0,0,0-26.54143-26.54143H331.1161l-3.31768-5.74622a3.83043,3.83043,0,0,0-6.63536,0l-3.31768,5.74622-3.31767-5.74622a3.83043,3.83043,0,0,0-6.63536,0l-3.31768,5.74622L301.257,417.205a3.83043,3.83043,0,0,0-6.63536,0L291.304,422.9512c-.02919,0-.05573.004-.08625.004l-5.49674-5.49541a3.8293,3.8293,0,0,0-6.4071,1.71723l-1.81676,6.77338L270.607,424.1031a3.82993,3.82993,0,0,0-4.6912,4.69253l1.84463,6.89148-6.77072,1.81411a3.8315,3.8315,0,0,0-1.71988,6.40975l5.49673,5.49673c0,.02787-.004.05574-.004.08493l-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74621,3.31768L259.0163,466.081a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31767a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31767a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768L259.0163,558.976a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768A26.54091,26.54091,0,0,0,291.304,635.28265H450.55254A26.5409,26.5409,0,0,0,477.094,608.74122V502.5755l-92.92155-5.80727a14.12639,14.12639,0,0,1,0-28.19762" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M424.01111,635.28265h39.81214V582.19979H424.01111Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M490.36468,602.10586a6.60242,6.60242,0,0,0-.848.08493c-.05042-.19906-.09821-.39945-.15393-.59852A6.62668,6.62668,0,1,0,482.80568,590.21q-.2203-.22491-.44457-.44589a6.62391,6.62391,0,1,0-11.39689-6.56369c-.1964-.05575-.39414-.10218-.59056-.15262a6.63957,6.63957,0,1,0-13.10086,0c-.1964.05042-.39414.09687-.59056.15262a6.62767,6.62767,0,1,0-11.39688,6.56369,26.52754,26.52754,0,1,0,44.23127,25.52756,6.6211,6.6211,0,1,0,.848-13.18579" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M437.28182,555.65836H477.094V529.11693H437.28182Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M490.36468,545.70532a3.31768,3.31768,0,0,0,0-6.63536,3.41133,3.41133,0,0,0-.42333.04247c-.02655-.09953-.04911-.19907-.077-.29859a3.319,3.319,0,0,0-1.278-6.37923,3.28174,3.28174,0,0,0-2.00122.68742q-.10947-.11346-.22294-.22295a3.282,3.282,0,0,0,.67149-1.98265,3.31768,3.31768,0,0,0-6.37-1.2992,13.27078,13.27078,0,1,0,0,25.54082,3.31768,3.31768,0,0,0,6.37-1.2992,3.282,3.282,0,0,0-.67149-1.98265q.11347-.10947.22294-.22294a3.28174,3.28174,0,0,0,2.00122.68742,3.31768,3.31768,0,0,0,1.278-6.37923c.02786-.0982.05042-.19907.077-.29859a3.41325,3.41325,0,0,0,.42333.04246" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M317.84538,466.081a3.31768,3.31768,0,0,1-3.31767-3.31768,9.953,9.953,0,1,0-19.90608,0,3.31768,3.31768,0,1,1-6.63535,0,16.58839,16.58839,0,1,1,33.17678,0,3.31768,3.31768,0,0,1-3.31768,3.31768" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
|
||||
<path d="M370.92825,635.28265h79.62429A26.5409,26.5409,0,0,0,477.094,608.74122v-92.895H397.46968a26.54091,26.54091,0,0,0-26.54143,26.54143Z" transform="translate(-35.5 -118.5)" fill="#ffff50" fill-rule="evenodd" />
|
||||
<path d="M457.21444,556.98543H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,1,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,1,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0-66.10674H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.29459H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414M477.094,474.19076c-.01592,0-.0292-.008-.04512-.00663-4.10064.13934-6.04083,4.24132-7.75274,7.86024-1.78623,3.78215-3.16771,6.24122-5.43171,6.16691-2.50685-.09024-3.94007-2.92222-5.45825-5.91874-1.74377-3.44243-3.73438-7.34667-7.91333-7.20069-4.04227.138-5.98907,3.70784-7.70631,6.857-1.82738,3.35484-3.07084,5.39455-5.46887,5.30033-2.55727-.09289-3.91619-2.39536-5.48877-5.06013-1.75306-2.96733-3.77951-6.30359-7.8775-6.18946-3.97326.13669-5.92537,3.16507-7.64791,5.83912-1.82207,2.82666-3.09872,4.5492-5.52725,4.447-2.61832-.09289-3.9706-2.00388-5.53522-4.21611-1.757-2.4856-3.737-5.299-7.82308-5.16231-3.88567.13271-5.83779,2.61434-7.559,4.80135-1.635,2.07555-2.9116,3.71846-5.61218,3.615a1.32793,1.32793,0,1,0-.09555,2.65414c4.00377.134,6.03154-2.38873,7.79257-4.6275,1.562-1.9853,2.91027-3.69855,5.56441-3.78879,2.55594-.10882,3.75429,1.47968,5.56707,4.04093,1.7212,2.43385,3.67465,5.19416,7.60545,5.33616,4.11789.138,6.09921-2.93946,7.8536-5.66261,1.56861-2.43385,2.92221-4.53461,5.50734-4.62352,2.37944-.08892,3.67466,1.79154,5.50072,4.885,1.72121,2.91557,3.67069,6.21865,7.67977,6.36463,4.14709.14332,6.14965-3.47693,7.89475-6.68181,1.51155-2.77092,2.93814-5.38791,5.46621-5.4755,2.37944-.05573,3.62025,2.11668,5.45558,5.74622,1.71459,3.388,3.65875,7.22591,7.73019,7.37321l.22429.004c4.06614,0,5.99571-4.08074,7.70364-7.68905,1.51154-3.19825,2.94211-6.21069,5.3972-6.33411Z" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
|
||||
<path d="M344.38682,635.28265h53.08286V582.19979H344.38682Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M424.01111,602.10586a6.60242,6.60242,0,0,0-.848.08493c-.05042-.19906-.09821-.39945-.15394-.59852A6.62667,6.62667,0,1,0,416.45211,590.21q-.2203-.22491-.44458-.44589a6.62391,6.62391,0,1,0-11.39689-6.56369c-.1964-.05575-.39413-.10218-.59054-.15262a6.63957,6.63957,0,1,0-13.10084,0c-.19641.05042-.39414.09687-.59055.15262a6.62767,6.62767,0,1,0-11.39689,6.56369,26.52755,26.52755,0,1,0,44.2313,25.52756,6.6211,6.6211,0,1,0,.848-13.18579" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M344.38682,555.65836h53.08286V529.11693H344.38682Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M410.74039,545.70532a3.31768,3.31768,0,1,0,0-6.63536,3.41133,3.41133,0,0,0-.42333.04247c-.02655-.09953-.04911-.19907-.077-.29859a3.319,3.319,0,0,0-1.278-6.37923,3.28174,3.28174,0,0,0-2.00122.68742q-.10947-.11346-.22294-.22295a3.282,3.282,0,0,0,.67149-1.98265,3.31768,3.31768,0,0,0-6.37-1.2992,13.27078,13.27078,0,1,0,0,25.54082,3.31768,3.31768,0,0,0,6.37-1.2992,3.282,3.282,0,0,0-.67149-1.98265q.11347-.10947.22294-.22294a3.28174,3.28174,0,0,0,2.00122.68742,3.31768,3.31768,0,0,0,1.278-6.37923c.02786-.0982.05042-.19907.077-.29859a3.41325,3.41325,0,0,0,.42333.04246" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M424.01111,447.8338a3.60349,3.60349,0,0,1-.65028-.06636,3.34415,3.34415,0,0,1-.62372-.18579,3.44679,3.44679,0,0,1-.572-.30522,5.02708,5.02708,0,0,1-.50429-.4114,3.88726,3.88726,0,0,1-.41007-.50428,3.27532,3.27532,0,0,1-.55737-1.84463,3.60248,3.60248,0,0,1,.06636-.65027,3.82638,3.82638,0,0,1,.18447-.62373,3.48858,3.48858,0,0,1,.30656-.57064,3.197,3.197,0,0,1,.91436-.91568,3.44685,3.44685,0,0,1,.572-.30523,3.344,3.344,0,0,1,.62372-.18578,3.06907,3.06907,0,0,1,1.30053,0,3.22332,3.22332,0,0,1,1.19436.491,5.02835,5.02835,0,0,1,.50429.41139,4.8801,4.8801,0,0,1,.41139.50429,3.38246,3.38246,0,0,1,.30522.57064,3.47806,3.47806,0,0,1,.25215,1.274A3.36394,3.36394,0,0,1,426.36,446.865a5.02708,5.02708,0,0,1-.50429.4114,3.3057,3.3057,0,0,1-1.84463.55737m26.54143-1.65884a3.38754,3.38754,0,0,1-2.35024-.96877,5.04185,5.04185,0,0,1-.41007-.50428,3.27532,3.27532,0,0,1-.55737-1.84463,3.38659,3.38659,0,0,1,.96744-2.34892,5.02559,5.02559,0,0,1,.50429-.41139,3.44685,3.44685,0,0,1,.572-.30523,3.3432,3.3432,0,0,1,.62373-.18579,3.06952,3.06952,0,0,1,1.30052,0,3.22356,3.22356,0,0,1,1.19436.491,5.02559,5.02559,0,0,1,.50429.41139,3.38792,3.38792,0,0,1,.96876,2.34892,3.72635,3.72635,0,0,1-.06636.65026,3.37387,3.37387,0,0,1-.18579.62373,4.71469,4.71469,0,0,1-.30522.57064,4.8801,4.8801,0,0,1-.41139.50429,5.02559,5.02559,0,0,1-.50429.41139,3.30547,3.30547,0,0,1-1.84463.55737" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
|
||||
</svg>
|
After Width: | Height: | Size: 12 KiB |
28
installer/create_installers.sh
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# For developers only! Not for users!
|
||||
# This creates the installer zip files that will be distributed to users
|
||||
# It packs install.{sh,bat} along with a readme, and ensures that the user
|
||||
# has the install script inside a new empty folder (after unzipping),
|
||||
# otherwise the git repo will extract into whatever folder the script is in.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf sygil
|
||||
mkdir -p sygil
|
||||
cp install.sh sygil
|
||||
cp readme.txt sygil
|
||||
|
||||
zip -r sygil-linux.zip sygil
|
||||
zip -r sygil-mac.zip sygil
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf sygil
|
||||
mkdir -p sygil
|
||||
cp install.bat sygil
|
||||
cp readme.txt sygil
|
||||
|
||||
zip -r sygil-windows.zip sygil
|
||||
|
||||
echo "The installer zips are ready to be distributed.."
|
96
installer/install.bat
Normal file
@ -0,0 +1,96 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Then, it'll run the webui.cmd file to continue with the installation as usual.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo "Installing Sygil WebUI.."
|
||||
echo.
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/Sygil-Dev/sygil-webui.git
|
||||
@rem Change the download URL to Sygil repo's release URL
|
||||
@rem We need to mirror micromamba.exe, because the official download URL uses tar.bz2 compression
|
||||
@rem which Windows can't unzip natively.
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html#windows
|
||||
set umamba_exists=F
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git config --global init.defaultBranch master
|
||||
call git init
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
call git checkout origin/master -ft
|
||||
)
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
|
||||
@rem make the models dir
|
||||
mkdir models\ldm\stable-diffusion-v1
|
||||
|
||||
@rem install the project
|
||||
call webui.cmd
|
||||
|
||||
@rem finally, tell the user that they need to download the ckpt
|
||||
echo.
|
||||
echo "Now you need to install the weights for the stable diffusion model."
|
||||
echo "Please follow the steps related to models weights at https://sd-webui.github.io/stable-diffusion-webui/docs/1.windows-installation.html#cloning-the-repo to complete the installation"
|
||||
|
||||
@rem it would be nice if the weights downloaded automatically, and didn't need the user to do this manually.
|
||||
|
||||
pause
|
90
installer/install.sh
Executable file
@ -0,0 +1,90 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Then, it'll run the webui.cmd file to continue with the installation as usual.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "Installing Sygil WebUI.."
|
||||
echo ""
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="aarch64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/linux-${OS_ARCH}/latest"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
if ! hash "conda" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate
|
||||
|
||||
# run the installer script for linux
|
||||
curl "https://raw.githubusercontent.com/JoshuaKimsey/Linux-StableDiffusion-Script/main/linux-sd.sh" > linux-sd.sh
|
||||
chmod u+x linux-sd.sh
|
||||
|
||||
./linux-sd.sh
|
||||
|
||||
# tell the user that they need to download the ckpt
|
||||
WEIGHTS_DOC_URL="https://sd-webui.github.io/stable-diffusion-webui/docs/2.linux-installation.html#initial-start-guide"
|
||||
|
||||
echo ""
|
||||
echo "Now you need to install the weights for the stable diffusion model."
|
||||
echo "Please follow the steps at $WEIGHTS_DOC_URL to complete the installation"
|
||||
|
||||
# it would be nice if the weights downloaded automatically, and didn't need the user to do this manually.
|
45
package.json
Normal file
@ -0,0 +1,45 @@
|
||||
{
|
||||
"name": "docs",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"docusaurus": "docusaurus",
|
||||
"start": "docusaurus start",
|
||||
"build": "docusaurus build",
|
||||
"swizzle": "docusaurus swizzle",
|
||||
"deploy": "docusaurus deploy",
|
||||
"clear": "docusaurus clear",
|
||||
"serve": "docusaurus serve",
|
||||
"write-translations": "docusaurus write-translations",
|
||||
"write-heading-ids": "docusaurus write-heading-ids"
|
||||
},
|
||||
"dependencies": {
|
||||
"@cmfcmf/docusaurus-search-local": "^0.11.0",
|
||||
"@docusaurus/core": "2.2.0",
|
||||
"@docusaurus/preset-classic": "2.2.0",
|
||||
"@mdx-js/react": "^1.6.22",
|
||||
"clsx": "^1.2.1",
|
||||
"create-docusaurus": "^2.2.0",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "2.2.0"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.5%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.14"
|
||||
}
|
||||
}
|
@ -25,12 +25,15 @@ Jinja2==3.1.2 # Jinja2 is required by Gradio
|
||||
gradio==3.4.1
|
||||
|
||||
# Environment Dependencies for WebUI (streamlit)
|
||||
streamlit==1.13.0
|
||||
streamlit==1.14.0
|
||||
streamlit-on-Hover-tabs==1.0.1
|
||||
streamlit-option-menu==0.3.2
|
||||
streamlit_nested_layout==0.1.1
|
||||
streamlit-server-state==0.14.2
|
||||
streamlit-tensorboard==0.0.2
|
||||
streamlit-elements==0.1.* # used for the draggable dashboard and new UI design (WIP)
|
||||
streamlit-ace==0.1.1 # used to replace the text area on the prompt and also for the code editor tool.
|
||||
#streamlit-base-extras # used for logging, thread spawning, thread locking and page routing. For now we are using a modified local version which we have to change to use the proper version from pypi.
|
||||
hydralit==1.0.14
|
||||
hydralit_components==1.0.10
|
||||
stqdm==0.0.4
|
||||
@ -41,6 +44,8 @@ matplotlib==3.6.
|
||||
resize-right==0.0.2
|
||||
torchdiffeq==0.2.3
|
||||
|
||||
# Environment Dependencies for WebUI (flet)
|
||||
|
||||
# txt2vid
|
||||
diffusers==0.6.0
|
||||
librosa==0.9.2
|
||||
|
@ -15,14 +15,12 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
#from sd_utils import *
|
||||
from sd_utils import *
|
||||
from sd_utils import st
|
||||
# streamlit imports
|
||||
|
||||
#streamlit components section
|
||||
|
||||
#other imports
|
||||
import os, time, requests
|
||||
import sys
|
||||
#from fastapi import FastAPI
|
||||
#import uvicorn
|
||||
|
||||
|
@ -14,11 +14,12 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st, logger
|
||||
# streamlit imports
|
||||
|
||||
|
||||
#other imports
|
||||
import os, requests
|
||||
from requests.auth import HTTPBasicAuth
|
||||
from requests import HTTPError
|
||||
from stqdm import stqdm
|
||||
|
@ -14,16 +14,18 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st, custom_models_available, logger, human_readable_size
|
||||
|
||||
# streamlit imports
|
||||
|
||||
# streamlit components section
|
||||
import streamlit_nested_layout
|
||||
from streamlit_server_state import server_state, server_state_lock
|
||||
from streamlit_server_state import server_state
|
||||
|
||||
# other imports
|
||||
from omegaconf import OmegaConf
|
||||
import torch
|
||||
import os, toml
|
||||
|
||||
# end of imports
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
@ -50,7 +52,7 @@ def layout():
|
||||
|
||||
with col1:
|
||||
st.title("General")
|
||||
st.session_state['defaults'].general.gpu = int(st.selectbox("GPU", device_list,
|
||||
st.session_state['defaults'].general.gpu = int(st.selectbox("GPU", device_list, index=st.session_state['defaults'].general.gpu,
|
||||
help=f"Select which GPU to use. Default: {device_list[0]}").split(":")[0])
|
||||
|
||||
st.session_state['defaults'].general.outdir = str(st.text_input("Output directory", value=st.session_state['defaults'].general.outdir,
|
||||
@ -208,15 +210,65 @@ def layout():
|
||||
with col4:
|
||||
st.title("Streamlit Config")
|
||||
|
||||
st.session_state["defaults"].general.streamlit_telemetry = st.checkbox("Enable Telemetry", value=st.session_state['defaults'].general.streamlit_telemetry,
|
||||
help="Enables or Disables streamlit telemetry. Default: False")
|
||||
st.session_state["streamlit_config"]["browser"]["gatherUsageStats"] = st.session_state["defaults"].general.streamlit_telemetry
|
||||
|
||||
default_theme_list = ["light", "dark"]
|
||||
st.session_state["defaults"].general.default_theme = st.selectbox("Default Theme", default_theme_list, index=default_theme_list.index(st.session_state['defaults'].general.default_theme),
|
||||
help="Defaut theme to use as base for streamlit. Default: dark")
|
||||
st.session_state["streamlit_config"]["theme"]["base"] = st.session_state["defaults"].general.default_theme
|
||||
|
||||
|
||||
if not st.session_state['defaults'].admin.hide_server_setting:
|
||||
with st.expander("Server", True):
|
||||
|
||||
st.session_state["streamlit_config"]['server']['headless'] = st.checkbox("Run Headless", help="If false, will attempt to open a browser window on start. \
|
||||
Default: false unless (1) we are on a Linux box where DISPLAY is unset, \
|
||||
or (2) we are running in the Streamlit Atom plugin.")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['port'] = st.number_input("Port", value=st.session_state["streamlit_config"]['server']['port'],
|
||||
help="The port where the server will listen for browser connections. Default: 8501")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['baseUrlPath'] = st.text_input("Base Url Path", value=st.session_state["streamlit_config"]['server']['baseUrlPath'],
|
||||
help="The base path for the URL where Streamlit should be served from. Default: '' ")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['enableCORS'] = st.checkbox("Enable CORS", value=st.session_state['streamlit_config']['server']['enableCORS'],
|
||||
help="Enables support for Cross-Origin Request Sharing (CORS) protection, for added security. \
|
||||
Due to conflicts between CORS and XSRF, if `server.enableXsrfProtection` is on and `server.enableCORS` \
|
||||
is off at the same time, we will prioritize `server.enableXsrfProtection`. Default: true")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['enableXsrfProtection'] = st.checkbox("Enable Xsrf Protection",
|
||||
value=st.session_state['streamlit_config']['server']['enableXsrfProtection'],
|
||||
help="Enables support for Cross-Site Request Forgery (XSRF) protection, \
|
||||
for added security. Due to conflicts between CORS and XSRF, \
|
||||
if `server.enableXsrfProtection` is on and `server.enableCORS` is off at \
|
||||
the same time, we will prioritize `server.enableXsrfProtection`. Default: true")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['maxUploadSize'] = st.number_input("Max Upload Size", value=st.session_state["streamlit_config"]['server']['maxUploadSize'],
|
||||
help="Max size, in megabytes, for files uploaded with the file_uploader. Default: 200")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['maxMessageSize'] = st.number_input("Max Message Size", value=st.session_state["streamlit_config"]['server']['maxUploadSize'],
|
||||
help="Max size, in megabytes, of messages that can be sent via the WebSocket connection. Default: 200")
|
||||
|
||||
st.session_state["streamlit_config"]['server']['enableWebsocketCompression'] = st.checkbox("Enable Websocket Compression",
|
||||
value=st.session_state["streamlit_config"]['server']['enableWebsocketCompression'],
|
||||
help=" Enables support for websocket compression. Default: false")
|
||||
if not st.session_state['defaults'].admin.hide_browser_setting:
|
||||
with st.expander("Browser", expanded=True):
|
||||
st.session_state["streamlit_config"]['browser']['serverAddress'] = st.text_input("Server Address",
|
||||
value=st.session_state["streamlit_config"]['browser']['serverAddress'] if "serverAddress" in st.session_state["streamlit_config"] else "localhost",
|
||||
help="Internet address where users should point their browsers in order \
|
||||
to connect to the app. Can be IP address or DNS name and path.\
|
||||
This is used to: - Set the correct URL for CORS and XSRF protection purposes. \
|
||||
- Show the URL on the terminal - Open the browser. Default: 'localhost'")
|
||||
|
||||
st.session_state["defaults"].general.streamlit_telemetry = st.checkbox("Enable Telemetry", value=st.session_state['defaults'].general.streamlit_telemetry,
|
||||
help="Enables or Disables streamlit telemetry. Default: False")
|
||||
st.session_state["streamlit_config"]["browser"]["gatherUsageStats"] = st.session_state["defaults"].general.streamlit_telemetry
|
||||
|
||||
st.session_state["streamlit_config"]['browser']['serverPort'] = st.number_input("Server Port", value=st.session_state["streamlit_config"]['browser']['serverPort'],
|
||||
help="Port where users should point their browsers in order to connect to the app. \
|
||||
This is used to: - Set the correct URL for CORS and XSRF protection purposes. \
|
||||
- Show the URL on the terminal - Open the browser \
|
||||
Default: whatever value is set in server.port.")
|
||||
|
||||
with col5:
|
||||
st.title("Huggingface")
|
||||
st.session_state["defaults"].general.huggingface_token = st.text_input("Huggingface Token", value=st.session_state['defaults'].general.huggingface_token, type="password",
|
||||
@ -225,6 +277,15 @@ def layout():
|
||||
and WILL NOT be share with us or anyone. You can get your access token \
|
||||
at https://huggingface.co/settings/tokens. Default: None")
|
||||
|
||||
st.title("Stable Horde")
|
||||
st.session_state["defaults"].general.stable_horde_api = st.text_input("Stable Horde Api", value=st.session_state["defaults"].general.stable_horde_api, type="password",
|
||||
help="First Register an account at https://stablehorde.net/register which will generate for you \
|
||||
an API key. Store that key somewhere safe. \n \
|
||||
If you do not want to register, you can use `0000000000` as api_key to connect anonymously.\
|
||||
However anonymous accounts have the lowest priority when there's too many concurrent requests! \
|
||||
To increase your priority you will need a unique API key and then to increase your Kudos \
|
||||
read more about them at https://dbzer0.com/blog/the-kudos-based-economy-for-the-koboldai-horde/.")
|
||||
|
||||
with txt2img_tab:
|
||||
col1, col2, col3, col4, col5 = st.columns(5, gap='medium')
|
||||
|
||||
|
91
scripts/barfi_baklavajs.py
Normal file
@ -0,0 +1,91 @@
|
||||
# This file is part of sygil-webui (https://github.com/Sygil-Dev/sandbox-webui/).
|
||||
|
||||
# Copyright 2022 Sygil-Dev team.
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
#from sd_utils import *
|
||||
from sd_utils import st
|
||||
# streamlit imports
|
||||
|
||||
#streamlit components section
|
||||
|
||||
#other imports
|
||||
from barfi import st_barfi, barfi_schemas, Block
|
||||
|
||||
# Temp imports
|
||||
|
||||
# end of imports
|
||||
#---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def layout():
|
||||
#st.info("Under Construction. :construction_worker:")
|
||||
|
||||
#from barfi import st_barfi, Block
|
||||
|
||||
#add = Block(name='Addition')
|
||||
#sub = Block(name='Subtraction')
|
||||
#mul = Block(name='Multiplication')
|
||||
#div = Block(name='Division')
|
||||
|
||||
#barfi_result = st_barfi(base_blocks= [add, sub, mul, div])
|
||||
# or if you want to use a category to organise them in the frontend sub-menu
|
||||
#barfi_result = st_barfi(base_blocks= {'Op 1': [add, sub], 'Op 2': [mul, div]})
|
||||
|
||||
col1, col2, col3 = st.columns([1, 8, 1])
|
||||
|
||||
with col2:
|
||||
feed = Block(name='Feed')
|
||||
feed.add_output()
|
||||
def feed_func(self):
|
||||
self.set_interface(name='Output 1', value=4)
|
||||
feed.add_compute(feed_func)
|
||||
|
||||
splitter = Block(name='Splitter')
|
||||
splitter.add_input()
|
||||
splitter.add_output()
|
||||
splitter.add_output()
|
||||
def splitter_func(self):
|
||||
in_1 = self.get_interface(name='Input 1')
|
||||
value = (in_1/2)
|
||||
self.set_interface(name='Output 1', value=value)
|
||||
self.set_interface(name='Output 2', value=value)
|
||||
splitter.add_compute(splitter_func)
|
||||
|
||||
mixer = Block(name='Mixer')
|
||||
mixer.add_input()
|
||||
mixer.add_input()
|
||||
mixer.add_output()
|
||||
def mixer_func(self):
|
||||
in_1 = self.get_interface(name='Input 1')
|
||||
in_2 = self.get_interface(name='Input 2')
|
||||
value = (in_1 + in_2)
|
||||
self.set_interface(name='Output 1', value=value)
|
||||
mixer.add_compute(mixer_func)
|
||||
|
||||
result = Block(name='Result')
|
||||
result.add_input()
|
||||
def result_func(self):
|
||||
in_1 = self.get_interface(name='Input 1')
|
||||
result.add_compute(result_func)
|
||||
|
||||
load_schema = st.selectbox('Select a saved schema:', barfi_schemas())
|
||||
|
||||
compute_engine = st.checkbox('Activate barfi compute engine', value=False)
|
||||
|
||||
barfi_result = st_barfi(base_blocks=[feed, result, mixer, splitter],
|
||||
compute_engine=compute_engine, load_schema=load_schema)
|
||||
|
||||
if barfi_result:
|
||||
st.write(barfi_result)
|
@ -14,10 +14,14 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st, server_state, \
|
||||
generation_callback, process_images, KDiffusionSampler, \
|
||||
custom_models_available, RealESRGAN_available, GFPGAN_available, \
|
||||
LDSR_available, load_models, hc, seed_to_int, logger, \
|
||||
resize_image, get_matched_noise, CFGMaskedDenoiser, ImageFilter
|
||||
|
||||
# streamlit imports
|
||||
from streamlit import StopException
|
||||
from streamlit.runtime.scriptrunner import StopException
|
||||
|
||||
#other imports
|
||||
import cv2
|
||||
@ -425,10 +429,10 @@ def layout():
|
||||
|
||||
|
||||
mask_expander = st.empty()
|
||||
with mask_expander.expander("Mask"):
|
||||
mask_mode_list = ["Mask", "Inverted mask", "Image alpha"]
|
||||
mask_mode = st.selectbox("Mask Mode", mask_mode_list, index=st.session_state["defaults"].img2img.mask_mode,
|
||||
help="Select how you want your image to be masked.\"Mask\" modifies the image where the mask is white.\n\
|
||||
with mask_expander.expander("Inpainting/Outpainting"):
|
||||
mask_mode_list = ["Outpainting", "Inpainting", "Image alpha"]
|
||||
mask_mode = st.selectbox("Painting Mode", mask_mode_list, index=st.session_state["defaults"].img2img.mask_mode,
|
||||
help="Select how you want your image to be masked/painted.\"Inpainting\" modifies the image where the mask is white.\n\
|
||||
\"Inverted mask\" modifies the image where the mask is black. \"Image alpha\" modifies the image where the image is transparent."
|
||||
)
|
||||
mask_mode = mask_mode_list.index(mask_mode)
|
||||
@ -545,7 +549,9 @@ def layout():
|
||||
upscaling_method_list.append("LDSR")
|
||||
|
||||
st.session_state["upscaling_method"] = st.selectbox("Upscaling Method", upscaling_method_list,
|
||||
index=upscaling_method_list.index(st.session_state['defaults'].general.upscaling_method))
|
||||
index=upscaling_method_list.index(st.session_state['defaults'].general.upscaling_method)
|
||||
if st.session_state['defaults'].general.upscaling_method in upscaling_method_list
|
||||
else 0)
|
||||
|
||||
if st.session_state["RealESRGAN_available"]:
|
||||
with st.expander("RealESRGAN"):
|
||||
|
@ -34,7 +34,7 @@ And if you're looking for more Ai art tools check out my [Ai generative art tool
|
||||
# ---------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st, logger, server_state, server_state_lock, random
|
||||
|
||||
# streamlit imports
|
||||
|
||||
@ -367,10 +367,7 @@ def layout():
|
||||
col1, col2 = st.columns([1, 4], gap="large")
|
||||
|
||||
with col1:
|
||||
#url = st.text_area("Input Text","")
|
||||
#url = st.text_input("Input Text","", placeholder="A corgi wearing a top hat as an oil painting.")
|
||||
#st.subheader("Input Image")
|
||||
st.session_state["uploaded_image"] = st.file_uploader('Input Image', type=['png', 'jpg', 'jpeg', 'jfif'], accept_multiple_files=True)
|
||||
st.session_state["uploaded_image"] = st.file_uploader('Input Image', type=['png', 'jpg', 'jpeg', 'jfif', 'webp'], accept_multiple_files=True)
|
||||
|
||||
with st.expander("CLIP models", expanded=True):
|
||||
st.session_state["ViT-L/14"] = st.checkbox("ViT-L/14", value=True, help="ViT-L/14 model.")
|
||||
|
@ -1,551 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import k_diffusion as K
|
||||
import tqdm
|
||||
from contextlib import contextmanager, nullcontext
|
||||
import skimage
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.kdiffusion import CFGMaskedDenoiser, KDiffusionSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
from nataili.util.cache import torch_gc
|
||||
from nataili.util.check_prompt_length import check_prompt_length
|
||||
from nataili.util.get_next_sequence_number import get_next_sequence_number
|
||||
from nataili.util.image_grid import image_grid
|
||||
from nataili.util.load_learned_embed_in_clip import load_learned_embed_in_clip
|
||||
from nataili.util.save_sample import save_sample
|
||||
from nataili.util.seed_to_int import seed_to_int
|
||||
from slugify import slugify
|
||||
import PIL
|
||||
|
||||
|
||||
class img2img:
|
||||
def __init__(self, model, device, output_dir, save_extension='jpg',
|
||||
output_file_path=False, load_concepts=False, concepts_dir=None,
|
||||
verify_input=True, auto_cast=True):
|
||||
self.model = model
|
||||
self.output_dir = output_dir
|
||||
self.output_file_path = output_file_path
|
||||
self.save_extension = save_extension
|
||||
self.load_concepts = load_concepts
|
||||
self.concepts_dir = concepts_dir
|
||||
self.verify_input = verify_input
|
||||
self.auto_cast = auto_cast
|
||||
self.device = device
|
||||
self.comments = []
|
||||
self.output_images = []
|
||||
self.info = ''
|
||||
self.stats = ''
|
||||
self.images = []
|
||||
|
||||
def create_random_tensors(self, shape, seeds):
|
||||
xs = []
|
||||
for seed in seeds:
|
||||
torch.manual_seed(seed)
|
||||
|
||||
# randn results depend on device; gpu and cpu get different results for same seed;
|
||||
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
|
||||
# but the original script had it like this so i do not dare change it for now because
|
||||
# it will break everyone's seeds.
|
||||
xs.append(torch.randn(shape, device=self.device))
|
||||
x = torch.stack(xs)
|
||||
return x
|
||||
|
||||
def process_prompt_tokens(self, prompt_tokens):
|
||||
# compviz codebase
|
||||
tokenizer = self.model.cond_stage_model.tokenizer
|
||||
text_encoder = self.model.cond_stage_model.transformer
|
||||
|
||||
# diffusers codebase
|
||||
#tokenizer = pipe.tokenizer
|
||||
#text_encoder = pipe.text_encoder
|
||||
|
||||
ext = ('.pt', '.bin')
|
||||
for token_name in prompt_tokens:
|
||||
embedding_path = os.path.join(self.concepts_dir, token_name)
|
||||
if os.path.exists(embedding_path):
|
||||
for files in os.listdir(embedding_path):
|
||||
if files.endswith(ext):
|
||||
load_learned_embed_in_clip(f"{os.path.join(embedding_path, files)}", text_encoder, tokenizer, f"<{token_name}>")
|
||||
else:
|
||||
print(f"Concept {token_name} not found in {self.concepts_dir}")
|
||||
del tokenizer, text_encoder
|
||||
return
|
||||
del tokenizer, text_encoder
|
||||
|
||||
def resize_image(self, resize_mode, im, width, height):
|
||||
LANCZOS = (PIL.Image.Resampling.LANCZOS if hasattr(PIL.Image, 'Resampling') else PIL.Image.LANCZOS)
|
||||
if resize_mode == "resize":
|
||||
res = im.resize((width, height), resample=LANCZOS)
|
||||
elif resize_mode == "crop":
|
||||
ratio = width / height
|
||||
src_ratio = im.width / im.height
|
||||
|
||||
src_w = width if ratio > src_ratio else im.width * height // im.height
|
||||
src_h = height if ratio <= src_ratio else im.height * width // im.width
|
||||
|
||||
resized = im.resize((src_w, src_h), resample=LANCZOS)
|
||||
res = PIL.Image.new("RGBA", (width, height))
|
||||
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
|
||||
else:
|
||||
ratio = width / height
|
||||
src_ratio = im.width / im.height
|
||||
|
||||
src_w = width if ratio < src_ratio else im.width * height // im.height
|
||||
src_h = height if ratio >= src_ratio else im.height * width // im.width
|
||||
|
||||
resized = im.resize((src_w, src_h), resample=LANCZOS)
|
||||
res = PIL.Image.new("RGBA", (width, height))
|
||||
res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
|
||||
|
||||
if ratio < src_ratio:
|
||||
fill_height = height // 2 - src_h // 2
|
||||
res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
|
||||
res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
|
||||
elif ratio > src_ratio:
|
||||
fill_width = width // 2 - src_w // 2
|
||||
res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
|
||||
res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
|
||||
|
||||
return res
|
||||
|
||||
#
|
||||
# helper fft routines that keep ortho normalization and auto-shift before and after fft
|
||||
def _fft2(self, data):
|
||||
if data.ndim > 2: # has channels
|
||||
out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
|
||||
for c in range(data.shape[2]):
|
||||
c_data = data[:,:,c]
|
||||
out_fft[:,:,c] = np.fft.fft2(np.fft.fftshift(c_data),norm="ortho")
|
||||
out_fft[:,:,c] = np.fft.ifftshift(out_fft[:,:,c])
|
||||
else: # one channel
|
||||
out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
|
||||
out_fft[:,:] = np.fft.fft2(np.fft.fftshift(data),norm="ortho")
|
||||
out_fft[:,:] = np.fft.ifftshift(out_fft[:,:])
|
||||
|
||||
return out_fft
|
||||
|
||||
def _ifft2(self, data):
|
||||
if data.ndim > 2: # has channels
|
||||
out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
|
||||
for c in range(data.shape[2]):
|
||||
c_data = data[:,:,c]
|
||||
out_ifft[:,:,c] = np.fft.ifft2(np.fft.fftshift(c_data),norm="ortho")
|
||||
out_ifft[:,:,c] = np.fft.ifftshift(out_ifft[:,:,c])
|
||||
else: # one channel
|
||||
out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
|
||||
out_ifft[:,:] = np.fft.ifft2(np.fft.fftshift(data),norm="ortho")
|
||||
out_ifft[:,:] = np.fft.ifftshift(out_ifft[:,:])
|
||||
|
||||
return out_ifft
|
||||
|
||||
def _get_gaussian_window(self, width, height, std=3.14, mode=0):
|
||||
|
||||
window_scale_x = float(width / min(width, height))
|
||||
window_scale_y = float(height / min(width, height))
|
||||
|
||||
window = np.zeros((width, height))
|
||||
x = (np.arange(width) / width * 2. - 1.) * window_scale_x
|
||||
for y in range(height):
|
||||
fy = (y / height * 2. - 1.) * window_scale_y
|
||||
if mode == 0:
|
||||
window[:, y] = np.exp(-(x**2+fy**2) * std)
|
||||
else:
|
||||
window[:, y] = (1/((x**2+1.) * (fy**2+1.))) ** (std/3.14) # hey wait a minute that's not gaussian
|
||||
|
||||
return window
|
||||
|
||||
def _get_masked_window_rgb(self, np_mask_grey, hardness=1.):
|
||||
np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3))
|
||||
if hardness != 1.:
|
||||
hardened = np_mask_grey[:] ** hardness
|
||||
else:
|
||||
hardened = np_mask_grey[:]
|
||||
for c in range(3):
|
||||
np_mask_rgb[:,:,c] = hardened[:]
|
||||
return np_mask_rgb
|
||||
|
||||
def get_matched_noise(self, _np_src_image, np_mask_rgb, noise_q, color_variation):
|
||||
"""
|
||||
Explanation:
|
||||
Getting good results in/out-painting with stable diffusion can be challenging.
|
||||
Although there are simpler effective solutions for in-painting, out-painting can be especially challenging because there is no color data
|
||||
in the masked area to help prompt the generator. Ideally, even for in-painting we'd like work effectively without that data as well.
|
||||
Provided here is my take on a potential solution to this problem.
|
||||
|
||||
By taking a fourier transform of the masked src img we get a function that tells us the presence and orientation of each feature scale in the unmasked src.
|
||||
Shaping the init/seed noise for in/outpainting to the same distribution of feature scales, orientations, and positions increases output coherence
|
||||
by helping keep features aligned. This technique is applicable to any continuous generation task such as audio or video, each of which can
|
||||
be conceptualized as a series of out-painting steps where the last half of the input "frame" is erased. For multi-channel data such as color
|
||||
or stereo sound the "color tone" or histogram of the seed noise can be matched to improve quality (using scikit-image currently)
|
||||
This method is quite robust and has the added benefit of being fast independently of the size of the out-painted area.
|
||||
The effects of this method include things like helping the generator integrate the pre-existing view distance and camera angle.
|
||||
|
||||
Carefully managing color and brightness with histogram matching is also essential to achieving good coherence.
|
||||
|
||||
noise_q controls the exponent in the fall-off of the distribution can be any positive number, lower values means higher detail (range > 0, default 1.)
|
||||
color_variation controls how much freedom is allowed for the colors/palette of the out-painted area (range 0..1, default 0.01)
|
||||
This code is provided as is under the Unlicense (https://unlicense.org/)
|
||||
Although you have no obligation to do so, if you found this code helpful please find it in your heart to credit me [parlance-zz].
|
||||
|
||||
Questions or comments can be sent to parlance@fifth-harmonic.com (https://github.com/parlance-zz/)
|
||||
This code is part of a new branch of a discord bot I am working on integrating with diffusers (https://github.com/parlance-zz/g-diffuser-bot)
|
||||
|
||||
"""
|
||||
|
||||
global DEBUG_MODE
|
||||
global TMP_ROOT_PATH
|
||||
|
||||
width = _np_src_image.shape[0]
|
||||
height = _np_src_image.shape[1]
|
||||
num_channels = _np_src_image.shape[2]
|
||||
|
||||
np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
|
||||
np_mask_grey = (np.sum(np_mask_rgb, axis=2)/3.)
|
||||
np_src_grey = (np.sum(np_src_image, axis=2)/3.)
|
||||
all_mask = np.ones((width, height), dtype=bool)
|
||||
img_mask = np_mask_grey > 1e-6
|
||||
ref_mask = np_mask_grey < 1e-3
|
||||
|
||||
windowed_image = _np_src_image * (1.-self._get_masked_window_rgb(np_mask_grey))
|
||||
windowed_image /= np.max(windowed_image)
|
||||
windowed_image += np.average(_np_src_image) * np_mask_rgb# / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
|
||||
#windowed_image += np.average(_np_src_image) * (np_mask_rgb * (1.- np_mask_rgb)) / (1.-np.average(np_mask_rgb)) # compensate for darkening across the mask transition area
|
||||
#_save_debug_img(windowed_image, "windowed_src_img")
|
||||
|
||||
src_fft = self._fft2(windowed_image) # get feature statistics from masked src img
|
||||
src_dist = np.absolute(src_fft)
|
||||
src_phase = src_fft / src_dist
|
||||
#_save_debug_img(src_dist, "windowed_src_dist")
|
||||
|
||||
noise_window = self._get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
|
||||
noise_rgb = np.random.random_sample((width, height, num_channels))
|
||||
noise_grey = (np.sum(noise_rgb, axis=2)/3.)
|
||||
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
|
||||
for c in range(num_channels):
|
||||
noise_rgb[:,:,c] += (1. - color_variation) * noise_grey
|
||||
|
||||
noise_fft = self._fft2(noise_rgb)
|
||||
for c in range(num_channels):
|
||||
noise_fft[:,:,c] *= noise_window
|
||||
noise_rgb = np.real(self._ifft2(noise_fft))
|
||||
shaped_noise_fft = self._fft2(noise_rgb)
|
||||
shaped_noise_fft[:,:,:] = np.absolute(shaped_noise_fft[:,:,:])**2 * (src_dist ** noise_q) * src_phase # perform the actual shaping
|
||||
|
||||
brightness_variation = 0.#color_variation # todo: temporarily tieing brightness variation to color variation for now
|
||||
contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2.
|
||||
|
||||
# scikit-image is used for histogram matching, very convenient!
|
||||
shaped_noise = np.real(self._ifft2(shaped_noise_fft))
|
||||
shaped_noise -= np.min(shaped_noise)
|
||||
shaped_noise /= np.max(shaped_noise)
|
||||
shaped_noise[img_mask,:] = skimage.exposure.match_histograms(shaped_noise[img_mask,:]**1., contrast_adjusted_np_src[ref_mask,:], channel_axis=1)
|
||||
shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb
|
||||
#_save_debug_img(shaped_noise, "shaped_noise")
|
||||
|
||||
matched_noise = np.zeros((width, height, num_channels))
|
||||
matched_noise = shaped_noise[:]
|
||||
#matched_noise[all_mask,:] = skimage.exposure.match_histograms(shaped_noise[all_mask,:], _np_src_image[ref_mask,:], channel_axis=1)
|
||||
#matched_noise = _np_src_image[:] * (1. - np_mask_rgb) + matched_noise * np_mask_rgb
|
||||
|
||||
#_save_debug_img(matched_noise, "matched_noise")
|
||||
|
||||
"""
|
||||
todo:
|
||||
color_variation doesnt have to be a single number, the overall color tone of the out-painted area could be param controlled
|
||||
"""
|
||||
|
||||
return np.clip(matched_noise, 0., 1.)
|
||||
|
||||
def find_noise_for_image(self, model, device, init_image, prompt, steps=200, cond_scale=2.0, verbose=False, normalize=False, generation_callback=None):
|
||||
image = np.array(init_image).astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
image = 2. * image - 1.
|
||||
image = image.to(device)
|
||||
x = model.get_first_stage_encoding(model.encode_first_stage(image))
|
||||
|
||||
uncond = model.get_learned_conditioning([''])
|
||||
cond = model.get_learned_conditioning([prompt])
|
||||
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
dnw = K.external.CompVisDenoiser(model)
|
||||
sigmas = dnw.get_sigmas(steps).flip(0)
|
||||
|
||||
if verbose:
|
||||
print(sigmas)
|
||||
|
||||
for i in tqdm.trange(1, len(sigmas)):
|
||||
x_in = torch.cat([x] * 2)
|
||||
sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
|
||||
cond_in = torch.cat([uncond, cond])
|
||||
|
||||
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]
|
||||
|
||||
if i == 1:
|
||||
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
|
||||
else:
|
||||
t = dnw.sigma_to_t(sigma_in)
|
||||
|
||||
eps = model.apply_model(x_in * c_in, t, cond=cond_in)
|
||||
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
|
||||
|
||||
denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cond_scale
|
||||
|
||||
if i == 1:
|
||||
d = (x - denoised) / (2 * sigmas[i])
|
||||
else:
|
||||
d = (x - denoised) / sigmas[i - 1]
|
||||
|
||||
dt = sigmas[i] - sigmas[i - 1]
|
||||
x = x + d * dt
|
||||
|
||||
return x / sigmas[-1]
|
||||
|
||||
def generate(self, prompt: str, init_img=None, init_mask=None, mask_mode='mask', resize_mode='resize', noise_mode='seed',
|
||||
denoising_strength:float=0.8, ddim_steps=50, sampler_name='k_lms', n_iter=1, batch_size=1, cfg_scale=7.5, seed=None,
|
||||
height=512, width=512, save_individual_images: bool = True, save_grid: bool = True, ddim_eta:float = 0.0):
|
||||
seed = seed_to_int(seed)
|
||||
image_dict = {
|
||||
"seed": seed
|
||||
}
|
||||
# Init image is assumed to be a PIL image
|
||||
init_img = self.resize_image('resize', init_img, width, height)
|
||||
if sampler_name == 'PLMS':
|
||||
sampler = PLMSSampler(self.model)
|
||||
elif sampler_name == 'DDIM':
|
||||
sampler = DDIMSampler(self.model)
|
||||
elif sampler_name == 'k_dpm_2_a':
|
||||
sampler = KDiffusionSampler(self.model,'dpm_2_ancestral')
|
||||
elif sampler_name == 'k_dpm_2':
|
||||
sampler = KDiffusionSampler(self.model,'dpm_2')
|
||||
elif sampler_name == 'k_euler_a':
|
||||
sampler = KDiffusionSampler(self.model,'euler_ancestral')
|
||||
elif sampler_name == 'k_euler':
|
||||
sampler = KDiffusionSampler(self.model,'euler')
|
||||
elif sampler_name == 'k_heun':
|
||||
sampler = KDiffusionSampler(self.model,'heun')
|
||||
elif sampler_name == 'k_lms':
|
||||
sampler = KDiffusionSampler(self.model,'lms')
|
||||
else:
|
||||
raise Exception("Unknown sampler: " + sampler_name)
|
||||
|
||||
torch_gc()
|
||||
def process_init_mask(init_mask: PIL.Image):
|
||||
if init_mask.mode == "RGBA":
|
||||
init_mask = init_mask.convert('RGBA')
|
||||
background = PIL.Image.new('RGBA', init_mask.size, (0, 0, 0))
|
||||
init_mask = PIL.Image.alpha_composite(background, init_mask)
|
||||
init_mask = init_mask.convert('RGB')
|
||||
return init_mask
|
||||
|
||||
if mask_mode == "mask":
|
||||
if init_mask:
|
||||
init_mask = process_init_mask(init_mask)
|
||||
elif mask_mode == "invert":
|
||||
if init_mask:
|
||||
init_mask = process_init_mask(init_mask)
|
||||
init_mask = PIL.ImageOps.invert(init_mask)
|
||||
elif mask_mode == "alpha":
|
||||
init_img_transparency = init_img.split()[-1].convert('L')#.point(lambda x: 255 if x > 0 else 0, mode='1')
|
||||
init_mask = init_img_transparency
|
||||
init_mask = init_mask.convert("RGB")
|
||||
init_mask = self.resize_image(resize_mode, init_mask, width, height)
|
||||
init_mask = init_mask.convert("RGB")
|
||||
|
||||
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||
t_enc = int(denoising_strength * ddim_steps)
|
||||
|
||||
if init_mask is not None and (noise_mode == "matched" or noise_mode == "find_and_matched") and init_img is not None:
|
||||
noise_q = 0.99
|
||||
color_variation = 0.0
|
||||
mask_blend_factor = 1.0
|
||||
|
||||
np_init = (np.asarray(init_img.convert("RGB"))/255.0).astype(np.float64) # annoyingly complex mask fixing
|
||||
np_mask_rgb = 1. - (np.asarray(PIL.ImageOps.invert(init_mask).convert("RGB"))/255.0).astype(np.float64)
|
||||
np_mask_rgb -= np.min(np_mask_rgb)
|
||||
np_mask_rgb /= np.max(np_mask_rgb)
|
||||
np_mask_rgb = 1. - np_mask_rgb
|
||||
np_mask_rgb_hardened = 1. - (np_mask_rgb < 0.99).astype(np.float64)
|
||||
blurred = skimage.filters.gaussian(np_mask_rgb_hardened[:], sigma=16., channel_axis=2, truncate=32.)
|
||||
blurred2 = skimage.filters.gaussian(np_mask_rgb_hardened[:], sigma=16., channel_axis=2, truncate=32.)
|
||||
#np_mask_rgb_dilated = np_mask_rgb + blurred # fixup mask todo: derive magic constants
|
||||
#np_mask_rgb = np_mask_rgb + blurred
|
||||
np_mask_rgb_dilated = np.clip((np_mask_rgb + blurred2) * 0.7071, 0., 1.)
|
||||
np_mask_rgb = np.clip((np_mask_rgb + blurred) * 0.7071, 0., 1.)
|
||||
|
||||
noise_rgb = self.get_matched_noise(np_init, np_mask_rgb, noise_q, color_variation)
|
||||
blend_mask_rgb = np.clip(np_mask_rgb_dilated,0.,1.) ** (mask_blend_factor)
|
||||
noised = noise_rgb[:]
|
||||
blend_mask_rgb **= (2.)
|
||||
noised = np_init[:] * (1. - blend_mask_rgb) + noised * blend_mask_rgb
|
||||
|
||||
np_mask_grey = np.sum(np_mask_rgb, axis=2)/3.
|
||||
ref_mask = np_mask_grey < 1e-3
|
||||
|
||||
all_mask = np.ones((height, width), dtype=bool)
|
||||
noised[all_mask,:] = skimage.exposure.match_histograms(noised[all_mask,:]**1., noised[ref_mask,:], channel_axis=1)
|
||||
|
||||
init_img = PIL.Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB")
|
||||
|
||||
def init():
|
||||
image = init_img.convert('RGB')
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
|
||||
mask_channel = None
|
||||
if init_mask:
|
||||
alpha = self.resize_image(resize_mode, init_mask, width // 8, height // 8)
|
||||
mask_channel = alpha.split()[-1]
|
||||
|
||||
mask = None
|
||||
if mask_channel is not None:
|
||||
mask = np.array(mask_channel).astype(np.float32) / 255.0
|
||||
mask = (1 - mask)
|
||||
mask = np.tile(mask, (4, 1, 1))
|
||||
mask = mask[None].transpose(0, 1, 2, 3)
|
||||
mask = torch.from_numpy(mask).to(self.model.device)
|
||||
|
||||
init_image = 2. * image - 1.
|
||||
init_image = init_image.to(self.model.device)
|
||||
init_latent = self.model.get_first_stage_encoding(self.model.encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
return init_latent, mask,
|
||||
|
||||
def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name):
|
||||
t_enc_steps = t_enc
|
||||
obliterate = False
|
||||
if ddim_steps == t_enc_steps:
|
||||
t_enc_steps = t_enc_steps - 1
|
||||
obliterate = True
|
||||
|
||||
if sampler_name != 'DDIM':
|
||||
x0, z_mask = init_data
|
||||
|
||||
sigmas = sampler.model_wrap.get_sigmas(ddim_steps)
|
||||
noise = x * sigmas[ddim_steps - t_enc_steps - 1]
|
||||
|
||||
xi = x0 + noise
|
||||
|
||||
# Obliterate masked image
|
||||
if z_mask is not None and obliterate:
|
||||
random = torch.randn(z_mask.shape, device=xi.device)
|
||||
xi = (z_mask * noise) + ((1-z_mask) * xi)
|
||||
|
||||
sigma_sched = sigmas[ddim_steps - t_enc_steps - 1:]
|
||||
model_wrap_cfg = CFGMaskedDenoiser(sampler.model_wrap)
|
||||
samples_ddim = K.sampling.__dict__[f'sample_{sampler.get_sampler_name()}'](model_wrap_cfg, xi, sigma_sched,
|
||||
extra_args={'cond': conditioning, 'uncond': unconditional_conditioning,
|
||||
'cond_scale': cfg_scale, 'mask': z_mask, 'x0': x0, 'xi': xi}, disable=False)
|
||||
else:
|
||||
|
||||
x0, z_mask = init_data
|
||||
|
||||
sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=0.0, verbose=False)
|
||||
z_enc = sampler.stochastic_encode(x0, torch.tensor([t_enc_steps]*batch_size).to(self.model.device))
|
||||
|
||||
# Obliterate masked image
|
||||
if z_mask is not None and obliterate:
|
||||
random = torch.randn(z_mask.shape, device=z_enc.device)
|
||||
z_enc = (z_mask * random) + ((1-z_mask) * z_enc)
|
||||
|
||||
# decode it
|
||||
samples_ddim = sampler.decode(z_enc, conditioning, t_enc_steps,
|
||||
unconditional_guidance_scale=cfg_scale,
|
||||
unconditional_conditioning=unconditional_conditioning,
|
||||
z_mask=z_mask, x0=x0)
|
||||
return samples_ddim
|
||||
|
||||
torch_gc()
|
||||
|
||||
if self.load_concepts and self.concepts_dir is not None:
|
||||
prompt_tokens = re.findall('<([a-zA-Z0-9-]+)>', prompt)
|
||||
if prompt_tokens:
|
||||
self.process_prompt_tokens(prompt_tokens)
|
||||
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
|
||||
sample_path = os.path.join(self.output_dir, "samples")
|
||||
os.makedirs(sample_path, exist_ok=True)
|
||||
|
||||
if self.verify_input:
|
||||
try:
|
||||
check_prompt_length(self.model, prompt, self.comments)
|
||||
except:
|
||||
import traceback
|
||||
print("Error verifying input:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
all_prompts = batch_size * n_iter * [prompt]
|
||||
all_seeds = [seed + x for x in range(len(all_prompts))]
|
||||
|
||||
precision_scope = torch.autocast if self.auto_cast else nullcontext
|
||||
|
||||
with torch.no_grad(), precision_scope("cuda"):
|
||||
for n in range(n_iter):
|
||||
print(f"Iteration: {n+1}/{n_iter}")
|
||||
prompts = all_prompts[n * batch_size:(n + 1) * batch_size]
|
||||
seeds = all_seeds[n * batch_size:(n + 1) * batch_size]
|
||||
|
||||
uc = self.model.get_learned_conditioning(len(prompts) * [''])
|
||||
|
||||
if isinstance(prompts, tuple):
|
||||
prompts = list(prompts)
|
||||
|
||||
c = self.model.get_learned_conditioning(prompts)
|
||||
|
||||
opt_C = 4
|
||||
opt_f = 8
|
||||
shape = [opt_C, height // opt_f, width // opt_f]
|
||||
|
||||
x = self.create_random_tensors(shape, seeds=seeds)
|
||||
init_data = init()
|
||||
samples_ddim = sample(init_data=init_data, x=x, conditioning=c, unconditional_conditioning=uc, sampler_name=sampler_name)
|
||||
|
||||
x_samples_ddim = self.model.decode_first_stage(samples_ddim)
|
||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
for i, x_sample in enumerate(x_samples_ddim):
|
||||
sanitized_prompt = slugify(prompts[i])
|
||||
full_path = os.path.join(os.getcwd(), sample_path)
|
||||
sample_path_i = sample_path
|
||||
base_count = get_next_sequence_number(sample_path_i)
|
||||
filename = f"{base_count:05}-{ddim_steps}_{sampler_name}_{seeds[i]}_{sanitized_prompt}"[:200-len(full_path)]
|
||||
|
||||
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
image = PIL.Image.fromarray(x_sample)
|
||||
image_dict['image'] = image
|
||||
self.images.append(image_dict)
|
||||
|
||||
if save_individual_images:
|
||||
path = os.path.join(sample_path, filename + '.' + self.save_extension)
|
||||
success = save_sample(image, filename, sample_path_i, self.save_extension)
|
||||
if success:
|
||||
if self.output_file_path:
|
||||
self.output_images.append(path)
|
||||
else:
|
||||
self.output_images.append(image)
|
||||
else:
|
||||
return
|
||||
|
||||
self.info = f"""
|
||||
{prompt}
|
||||
Steps: {ddim_steps}, Sampler: {sampler_name}, CFG scale: {cfg_scale}, Seed: {seed}
|
||||
""".strip()
|
||||
self.stats = f'''
|
||||
'''
|
||||
|
||||
for comment in self.comments:
|
||||
self.info += "\n\n" + comment
|
||||
|
||||
torch_gc()
|
||||
|
||||
del sampler
|
||||
|
||||
return
|
@ -1,201 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from contextlib import contextmanager, nullcontext
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.kdiffusion import KDiffusionSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
from nataili.util.cache import torch_gc
|
||||
from nataili.util.check_prompt_length import check_prompt_length
|
||||
from nataili.util.get_next_sequence_number import get_next_sequence_number
|
||||
from nataili.util.image_grid import image_grid
|
||||
from nataili.util.load_learned_embed_in_clip import load_learned_embed_in_clip
|
||||
from nataili.util.save_sample import save_sample
|
||||
from nataili.util.seed_to_int import seed_to_int
|
||||
from slugify import slugify
|
||||
|
||||
|
||||
class txt2img:
|
||||
def __init__(self, model, device, output_dir, save_extension='jpg',
|
||||
output_file_path=False, load_concepts=False, concepts_dir=None,
|
||||
verify_input=True, auto_cast=True):
|
||||
self.model = model
|
||||
self.output_dir = output_dir
|
||||
self.output_file_path = output_file_path
|
||||
self.save_extension = save_extension
|
||||
self.load_concepts = load_concepts
|
||||
self.concepts_dir = concepts_dir
|
||||
self.verify_input = verify_input
|
||||
self.auto_cast = auto_cast
|
||||
self.device = device
|
||||
self.comments = []
|
||||
self.output_images = []
|
||||
self.info = ''
|
||||
self.stats = ''
|
||||
self.images = []
|
||||
|
||||
def create_random_tensors(self, shape, seeds):
|
||||
xs = []
|
||||
for seed in seeds:
|
||||
torch.manual_seed(seed)
|
||||
|
||||
# randn results depend on device; gpu and cpu get different results for same seed;
|
||||
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
|
||||
# but the original script had it like this so i do not dare change it for now because
|
||||
# it will break everyone's seeds.
|
||||
xs.append(torch.randn(shape, device=self.device))
|
||||
x = torch.stack(xs)
|
||||
return x
|
||||
|
||||
def process_prompt_tokens(self, prompt_tokens):
|
||||
# compviz codebase
|
||||
tokenizer = self.model.cond_stage_model.tokenizer
|
||||
text_encoder = self.model.cond_stage_model.transformer
|
||||
|
||||
# diffusers codebase
|
||||
#tokenizer = pipe.tokenizer
|
||||
#text_encoder = pipe.text_encoder
|
||||
|
||||
ext = ('.pt', '.bin')
|
||||
for token_name in prompt_tokens:
|
||||
embedding_path = os.path.join(self.concepts_dir, token_name)
|
||||
if os.path.exists(embedding_path):
|
||||
for files in os.listdir(embedding_path):
|
||||
if files.endswith(ext):
|
||||
load_learned_embed_in_clip(f"{os.path.join(embedding_path, files)}", text_encoder, tokenizer, f"<{token_name}>")
|
||||
else:
|
||||
print(f"Concept {token_name} not found in {self.concepts_dir}")
|
||||
del tokenizer, text_encoder
|
||||
return
|
||||
del tokenizer, text_encoder
|
||||
|
||||
def generate(self, prompt: str, ddim_steps=50, sampler_name='k_lms', n_iter=1, batch_size=1, cfg_scale=7.5, seed=None,
|
||||
height=512, width=512, save_individual_images: bool = True, save_grid: bool = True, ddim_eta:float = 0.0):
|
||||
seed = seed_to_int(seed)
|
||||
|
||||
image_dict = {
|
||||
"seed": seed
|
||||
}
|
||||
negprompt = ''
|
||||
if '###' in prompt:
|
||||
prompt, negprompt = prompt.split('###', 1)
|
||||
prompt = prompt.strip()
|
||||
negprompt = negprompt.strip()
|
||||
|
||||
if sampler_name == 'PLMS':
|
||||
sampler = PLMSSampler(self.model)
|
||||
elif sampler_name == 'DDIM':
|
||||
sampler = DDIMSampler(self.model)
|
||||
elif sampler_name == 'k_dpm_2_a':
|
||||
sampler = KDiffusionSampler(self.model,'dpm_2_ancestral')
|
||||
elif sampler_name == 'k_dpm_2':
|
||||
sampler = KDiffusionSampler(self.model,'dpm_2')
|
||||
elif sampler_name == 'k_euler_a':
|
||||
sampler = KDiffusionSampler(self.model,'euler_ancestral')
|
||||
elif sampler_name == 'k_euler':
|
||||
sampler = KDiffusionSampler(self.model,'euler')
|
||||
elif sampler_name == 'k_heun':
|
||||
sampler = KDiffusionSampler(self.model,'heun')
|
||||
elif sampler_name == 'k_lms':
|
||||
sampler = KDiffusionSampler(self.model,'lms')
|
||||
else:
|
||||
raise Exception("Unknown sampler: " + sampler_name)
|
||||
|
||||
def sample(init_data, x, conditioning, unconditional_conditioning, sampler_name):
|
||||
samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=conditioning, unconditional_guidance_scale=cfg_scale,
|
||||
unconditional_conditioning=unconditional_conditioning, x_T=x)
|
||||
return samples_ddim
|
||||
|
||||
torch_gc()
|
||||
|
||||
if self.load_concepts and self.concepts_dir is not None:
|
||||
prompt_tokens = re.findall('<([a-zA-Z0-9-]+)>', prompt)
|
||||
if prompt_tokens:
|
||||
self.process_prompt_tokens(prompt_tokens)
|
||||
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
|
||||
sample_path = os.path.join(self.output_dir, "samples")
|
||||
os.makedirs(sample_path, exist_ok=True)
|
||||
|
||||
if self.verify_input:
|
||||
try:
|
||||
check_prompt_length(self.model, prompt, self.comments)
|
||||
except:
|
||||
import traceback
|
||||
print("Error verifying input:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
all_prompts = batch_size * n_iter * [prompt]
|
||||
all_seeds = [seed + x for x in range(len(all_prompts))]
|
||||
|
||||
precision_scope = torch.autocast if self.auto_cast else nullcontext
|
||||
|
||||
with torch.no_grad(), precision_scope("cuda"):
|
||||
for n in range(n_iter):
|
||||
print(f"Iteration: {n+1}/{n_iter}")
|
||||
prompts = all_prompts[n * batch_size:(n + 1) * batch_size]
|
||||
seeds = all_seeds[n * batch_size:(n + 1) * batch_size]
|
||||
|
||||
uc = self.model.get_learned_conditioning(len(prompts) * [negprompt])
|
||||
|
||||
if isinstance(prompts, tuple):
|
||||
prompts = list(prompts)
|
||||
|
||||
c = self.model.get_learned_conditioning(prompts)
|
||||
|
||||
opt_C = 4
|
||||
opt_f = 8
|
||||
shape = [opt_C, height // opt_f, width // opt_f]
|
||||
|
||||
x = self.create_random_tensors(shape, seeds=seeds)
|
||||
|
||||
samples_ddim = sample(init_data=None, x=x, conditioning=c, unconditional_conditioning=uc, sampler_name=sampler_name)
|
||||
|
||||
x_samples_ddim = self.model.decode_first_stage(samples_ddim)
|
||||
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
for i, x_sample in enumerate(x_samples_ddim):
|
||||
sanitized_prompt = slugify(prompts[i])
|
||||
full_path = os.path.join(os.getcwd(), sample_path)
|
||||
sample_path_i = sample_path
|
||||
base_count = get_next_sequence_number(sample_path_i)
|
||||
filename = f"{base_count:05}-{ddim_steps}_{sampler_name}_{seeds[i]}_{sanitized_prompt}"[:200-len(full_path)]
|
||||
|
||||
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
image = PIL.Image.fromarray(x_sample)
|
||||
image_dict['image'] = image
|
||||
self.images.append(image_dict)
|
||||
|
||||
if save_individual_images:
|
||||
path = os.path.join(sample_path, filename + '.' + self.save_extension)
|
||||
success = save_sample(image, filename, sample_path_i, self.save_extension)
|
||||
if success:
|
||||
if self.output_file_path:
|
||||
self.output_images.append(path)
|
||||
else:
|
||||
self.output_images.append(image)
|
||||
else:
|
||||
return
|
||||
|
||||
self.info = f"""
|
||||
{prompt}
|
||||
Steps: {ddim_steps}, Sampler: {sampler_name}, CFG scale: {cfg_scale}, Seed: {seed}
|
||||
""".strip()
|
||||
self.stats = f'''
|
||||
'''
|
||||
|
||||
for comment in self.comments:
|
||||
self.info += "\n\n" + comment
|
||||
|
||||
torch_gc()
|
||||
|
||||
del sampler
|
||||
|
||||
return
|
@ -1,458 +0,0 @@
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
import zipfile
|
||||
import requests
|
||||
import git
|
||||
import torch
|
||||
import hashlib
|
||||
from ldm.util import instantiate_from_config
|
||||
from omegaconf import OmegaConf
|
||||
from transformers import logging
|
||||
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from gfpgan import GFPGANer
|
||||
from realesrgan import RealESRGANer
|
||||
from ldm.models.blip import blip_decoder
|
||||
from tqdm import tqdm
|
||||
import open_clip
|
||||
import clip
|
||||
|
||||
from nataili.util.cache import torch_gc
|
||||
from nataili.util import logger
|
||||
|
||||
logging.set_verbosity_error()
|
||||
|
||||
models = json.load(open('./db.json'))
|
||||
dependencies = json.load(open('./db_dep.json'))
|
||||
remote_models = "https://raw.githubusercontent.com/Sygil-Dev/nataili-model-reference/main/db.json"
|
||||
remote_dependencies = "https://raw.githubusercontent.com/Sygil-Dev/nataili-model-reference/main/db_dep.json"
|
||||
|
||||
class ModelManager():
|
||||
def __init__(self, hf_auth=None, download=True):
|
||||
if download:
|
||||
try:
|
||||
logger.init("Model Reference", status="Downloading")
|
||||
r = requests.get(remote_models)
|
||||
self.models = r.json()
|
||||
r = requests.get(remote_dependencies)
|
||||
self.dependencies = json.load(open('./db_dep.json'))
|
||||
logger.init_ok("Model Reference", status="OK")
|
||||
except:
|
||||
logger.init_err("Model Reference", status="Download Error")
|
||||
self.models = json.load(open('./db.json'))
|
||||
self.dependencies = json.load(open('./db_dep.json'))
|
||||
logger.init_warn("Model Reference", status="Local")
|
||||
self.available_models = []
|
||||
self.tainted_models = []
|
||||
self.available_dependencies = []
|
||||
self.loaded_models = {}
|
||||
self.hf_auth = None
|
||||
self.set_authentication(hf_auth)
|
||||
|
||||
def init(self):
|
||||
dependencies_available = []
|
||||
for dependency in self.dependencies:
|
||||
if self.check_available(self.get_dependency_files(dependency)):
|
||||
dependencies_available.append(dependency)
|
||||
self.available_dependencies = dependencies_available
|
||||
|
||||
models_available = []
|
||||
for model in self.models:
|
||||
if self.check_available(self.get_model_files(model)):
|
||||
models_available.append(model)
|
||||
self.available_models = models_available
|
||||
|
||||
if self.hf_auth is not None:
|
||||
if 'username' not in self.hf_auth and 'password' not in self.hf_auth:
|
||||
raise ValueError('hf_auth must contain username and password')
|
||||
else:
|
||||
if self.hf_auth['username'] == '' or self.hf_auth['password'] == '':
|
||||
raise ValueError('hf_auth must contain username and password')
|
||||
return True
|
||||
|
||||
def set_authentication(self, hf_auth=None):
|
||||
# We do not let No authentication override previously set auth
|
||||
if not hf_auth and self.hf_auth:
|
||||
return
|
||||
self.hf_auth = hf_auth
|
||||
|
||||
def get_model(self, model_name):
|
||||
return self.models.get(model_name)
|
||||
|
||||
def get_filtered_models(self, **kwargs):
|
||||
'''Get all model names.
|
||||
Can filter based on metadata of the model reference db
|
||||
'''
|
||||
filtered_models = self.models
|
||||
for keyword in kwargs:
|
||||
iterating_models = filtered_models.copy()
|
||||
filtered_models = {}
|
||||
for model in iterating_models:
|
||||
# logger.debug([keyword,iterating_models[model].get(keyword),kwargs[keyword]])
|
||||
if iterating_models[model].get(keyword) == kwargs[keyword]:
|
||||
filtered_models[model] = iterating_models[model]
|
||||
return filtered_models
|
||||
|
||||
def get_filtered_model_names(self, **kwargs):
|
||||
filtered_models = self.get_filtered_models(**kwargs)
|
||||
return list(filtered_models.keys())
|
||||
|
||||
def get_dependency(self, dependency_name):
|
||||
return self.dependencies[dependency_name]
|
||||
|
||||
def get_model_files(self, model_name):
|
||||
return self.models[model_name]['config']['files']
|
||||
|
||||
def get_dependency_files(self, dependency_name):
|
||||
return self.dependencies[dependency_name]['config']['files']
|
||||
|
||||
def get_model_download(self, model_name):
|
||||
return self.models[model_name]['config']['download']
|
||||
|
||||
def get_dependency_download(self, dependency_name):
|
||||
return self.dependencies[dependency_name]['config']['download']
|
||||
|
||||
def get_available_models(self):
|
||||
return self.available_models
|
||||
|
||||
def get_available_dependencies(self):
|
||||
return self.available_dependencies
|
||||
|
||||
def get_loaded_models(self):
|
||||
return self.loaded_models
|
||||
|
||||
def get_loaded_models_names(self):
|
||||
return list(self.loaded_models.keys())
|
||||
|
||||
def get_loaded_model(self, model_name):
|
||||
return self.loaded_models[model_name]
|
||||
|
||||
def unload_model(self, model_name):
|
||||
if model_name in self.loaded_models:
|
||||
del self.loaded_models[model_name]
|
||||
return True
|
||||
return False
|
||||
|
||||
def unload_all_models(self):
|
||||
for model in self.loaded_models:
|
||||
del self.loaded_models[model]
|
||||
return True
|
||||
|
||||
def taint_model(self,model_name):
|
||||
'''Marks a model as not valid by remiving it from available_models'''
|
||||
if model_name in self.available_models:
|
||||
self.available_models.remove(model_name)
|
||||
self.tainted_models.append(model_name)
|
||||
|
||||
def taint_models(self, models):
|
||||
for model in models:
|
||||
self.taint_model(model)
|
||||
|
||||
def load_model_from_config(self, model_path='', config_path='', map_location="cpu"):
|
||||
config = OmegaConf.load(config_path)
|
||||
pl_sd = torch.load(model_path, map_location=map_location)
|
||||
if "global_step" in pl_sd:
|
||||
logger.info(f"Global Step: {pl_sd['global_step']}")
|
||||
sd = pl_sd["state_dict"]
|
||||
model = instantiate_from_config(config.model)
|
||||
m, u = model.load_state_dict(sd, strict=False)
|
||||
model = model.eval()
|
||||
del pl_sd, sd, m, u
|
||||
return model
|
||||
|
||||
def load_ckpt(self, model_name='', precision='half', gpu_id=0):
|
||||
ckpt_path = self.get_model_files(model_name)[0]['path']
|
||||
config_path = self.get_model_files(model_name)[1]['path']
|
||||
model = self.load_model_from_config(model_path=ckpt_path, config_path=config_path)
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
model = (model if precision=='full' else model.half()).to(device)
|
||||
torch_gc()
|
||||
return {'model': model, 'device': device}
|
||||
|
||||
def load_realesrgan(self, model_name='', precision='half', gpu_id=0):
|
||||
|
||||
RealESRGAN_models = {
|
||||
'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4),
|
||||
'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
||||
}
|
||||
|
||||
model_path = self.get_model_files(model_name)[0]['path']
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
model = RealESRGANer(scale=2, model_path=model_path, model=RealESRGAN_models[models[model_name]['name']],
|
||||
pre_pad=0, half=True if precision == 'half' else False, device=device)
|
||||
return {'model': model, 'device': device}
|
||||
|
||||
def load_gfpgan(self, model_name='', gpu_id=0):
|
||||
|
||||
model_path = self.get_model_files(model_name)[0]['path']
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
model = GFPGANer(model_path=model_path, upscale=1, arch='clean',
|
||||
channel_multiplier=2, bg_upsampler=None, device=device)
|
||||
return {'model': model, 'device': device}
|
||||
|
||||
def load_blip(self, model_name='', precision='half', gpu_id=0, blip_image_eval_size=512, vit='base'):
|
||||
# vit = 'base' or 'large'
|
||||
model_path = self.get_model_files(model_name)[0]['path']
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
model = blip_decoder(pretrained=model_path,
|
||||
med_config="configs/blip/med_config.json",
|
||||
image_size=blip_image_eval_size, vit=vit)
|
||||
model = model.eval()
|
||||
model = (model if precision=='full' else model.half()).to(device)
|
||||
return {'model': model, 'device': device}
|
||||
|
||||
def load_open_clip(self, model_name='', precision='half', gpu_id=0):
|
||||
pretrained = self.get_model(model_name)['pretrained_name']
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
model, _, preprocesses = open_clip.create_model_and_transforms(model_name, pretrained=pretrained, cache_dir='models/clip')
|
||||
model = model.eval()
|
||||
model = (model if precision=='full' else model.half()).to(device)
|
||||
return {'model': model, 'device': device, 'preprocesses': preprocesses}
|
||||
|
||||
def load_clip(self, model_name='', precision='half', gpu_id=0):
|
||||
device = torch.device(f"cuda:{gpu_id}")
|
||||
model, preprocesses = clip.load(model_name, device=device, download_root='models/clip')
|
||||
model = model.eval()
|
||||
model = (model if precision=='full' else model.half()).to(device)
|
||||
return {'model': model, 'device': device, 'preprocesses': preprocesses}
|
||||
|
||||
def load_model(self, model_name='', precision='half', gpu_id=0):
|
||||
if model_name not in self.available_models:
|
||||
return False
|
||||
if self.models[model_name]['type'] == 'ckpt':
|
||||
self.loaded_models[model_name] = self.load_ckpt(model_name, precision, gpu_id)
|
||||
return True
|
||||
elif self.models[model_name]['type'] == 'realesrgan':
|
||||
self.loaded_models[model_name] = self.load_realesrgan(model_name, precision, gpu_id)
|
||||
return True
|
||||
elif self.models[model_name]['type'] == 'gfpgan':
|
||||
self.loaded_models[model_name] = self.load_gfpgan(model_name, gpu_id)
|
||||
return True
|
||||
elif self.models[model_name]['type'] == 'blip':
|
||||
self.loaded_models[model_name] = self.load_blip(model_name, precision, gpu_id, 512, 'base')
|
||||
return True
|
||||
elif self.models[model_name]['type'] == 'open_clip':
|
||||
self.loaded_models[model_name] = self.load_open_clip(model_name, precision, gpu_id)
|
||||
return True
|
||||
elif self.models[model_name]['type'] == 'clip':
|
||||
self.loaded_models[model_name] = self.load_clip(model_name, precision, gpu_id)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def validate_model(self, model_name):
|
||||
files = self.get_model_files(model_name)
|
||||
all_ok = True
|
||||
for file_details in files:
|
||||
if not self.check_file_available(file_details['path']):
|
||||
return False
|
||||
if not self.validate_file(file_details):
|
||||
return False
|
||||
return True
|
||||
|
||||
def validate_file(self, file_details):
|
||||
if 'md5sum' in file_details:
|
||||
file_name = file_details['path']
|
||||
logger.debug(f"Getting md5sum of {file_name}")
|
||||
with open(file_name, 'rb') as file_to_check:
|
||||
file_hash = hashlib.md5()
|
||||
while chunk := file_to_check.read(8192):
|
||||
file_hash.update(chunk)
|
||||
if file_details['md5sum'] != file_hash.hexdigest():
|
||||
return False
|
||||
return True
|
||||
|
||||
def check_file_available(self, file_path):
|
||||
return os.path.exists(file_path)
|
||||
|
||||
def check_available(self, files):
|
||||
available = True
|
||||
for file in files:
|
||||
if not self.check_file_available(file['path']):
|
||||
available = False
|
||||
return available
|
||||
|
||||
def download_file(self, url, file_path):
|
||||
# make directory
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
pbar_desc = file_path.split('/')[-1]
|
||||
r = requests.get(url, stream=True)
|
||||
with open(file_path, 'wb') as f:
|
||||
with tqdm(
|
||||
# all optional kwargs
|
||||
unit='B', unit_scale=True, unit_divisor=1024, miniters=1,
|
||||
desc=pbar_desc, total=int(r.headers.get('content-length', 0))
|
||||
) as pbar:
|
||||
for chunk in r.iter_content(chunk_size=16*1024):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
pbar.update(len(chunk))
|
||||
|
||||
def download_model(self, model_name):
|
||||
if model_name in self.available_models:
|
||||
logger.info(f"{model_name} is already available.")
|
||||
return True
|
||||
download = self.get_model_download(model_name)
|
||||
files = self.get_model_files(model_name)
|
||||
for i in range(len(download)):
|
||||
file_path = f"{download[i]['file_path']}/{download[i]['file_name']}" if 'file_path' in download[i] else files[i]['path']
|
||||
|
||||
if 'file_url' in download[i]:
|
||||
download_url = download[i]['file_url']
|
||||
if 'hf_auth' in download[i]:
|
||||
username = self.hf_auth['username']
|
||||
password = self.hf_auth['password']
|
||||
download_url = download_url.format(username=username, password=password)
|
||||
if 'file_name' in download[i]:
|
||||
download_name = download[i]['file_name']
|
||||
if 'file_path' in download[i]:
|
||||
download_path = download[i]['file_path']
|
||||
|
||||
if 'manual' in download[i]:
|
||||
logger.warning(f"The model {model_name} requires manual download from {download_url}. Please place it in {download_path}/{download_name} then press ENTER to continue...")
|
||||
input('')
|
||||
continue
|
||||
# TODO: simplify
|
||||
if "file_content" in download[i]:
|
||||
file_content = download[i]['file_content']
|
||||
logger.info(f"writing {file_content} to {file_path}")
|
||||
# make directory download_path
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
# write file_content to download_path/download_name
|
||||
with open(os.path.join(download_path, download_name), 'w') as f:
|
||||
f.write(file_content)
|
||||
elif 'symlink' in download[i]:
|
||||
logger.info(f"symlink {file_path} to {download[i]['symlink']}")
|
||||
symlink = download[i]['symlink']
|
||||
# make directory symlink
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
# make symlink from download_path/download_name to symlink
|
||||
os.symlink(symlink, os.path.join(download_path, download_name))
|
||||
elif 'git' in download[i]:
|
||||
logger.info(f"git clone {download_url} to {file_path}")
|
||||
# make directory download_path
|
||||
os.makedirs(file_path, exist_ok=True)
|
||||
git.Git(file_path).clone(download_url)
|
||||
if 'post_process' in download[i]:
|
||||
for post_process in download[i]['post_process']:
|
||||
if 'delete' in post_process:
|
||||
# delete folder post_process['delete']
|
||||
logger.info(f"delete {post_process['delete']}")
|
||||
try:
|
||||
shutil.rmtree(post_process['delete'])
|
||||
except PermissionError as e:
|
||||
logger.error(f"[!] Something went wrong while deleting the `{post_process['delete']}`. Please delete it manually.")
|
||||
logger.error("PermissionError: ", e)
|
||||
else:
|
||||
if not self.check_file_available(file_path) or model_name in self.tainted_models:
|
||||
logger.debug(f'Downloading {download_url} to {file_path}')
|
||||
self.download_file(download_url, file_path)
|
||||
if not self.validate_model(model_name):
|
||||
return False
|
||||
if model_name in self.tainted_models:
|
||||
self.tainted_models.remove(model_name)
|
||||
self.init()
|
||||
return True
|
||||
|
||||
def download_dependency(self, dependency_name):
|
||||
if dependency_name in self.available_dependencies:
|
||||
logger.info(f"{dependency_name} is already installed.")
|
||||
return True
|
||||
download = self.get_dependency_download(dependency_name)
|
||||
files = self.get_dependency_files(dependency_name)
|
||||
for i in range(len(download)):
|
||||
if "git" in download[i]:
|
||||
logger.warning("git download not implemented yet")
|
||||
break
|
||||
|
||||
file_path = files[i]['path']
|
||||
if 'file_url' in download[i]:
|
||||
download_url = download[i]['file_url']
|
||||
if 'file_name' in download[i]:
|
||||
download_name = download[i]['file_name']
|
||||
if 'file_path' in download[i]:
|
||||
download_path = download[i]['file_path']
|
||||
logger.debug(download_name)
|
||||
if "unzip" in download[i]:
|
||||
zip_path = f'temp/{download_name}.zip'
|
||||
# os dirname zip_path
|
||||
# mkdir temp
|
||||
os.makedirs("temp", exist_ok=True)
|
||||
|
||||
self.download_file(download_url, zip_path)
|
||||
logger.info(f"unzip {zip_path}")
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
zip_ref.extractall('temp/')
|
||||
# move temp/sd-concepts-library-main/sd-concepts-library to download_path
|
||||
logger.info(f"move temp/{download_name}-main/{download_name} to {download_path}")
|
||||
shutil.move(f"temp/{download_name}-main/{download_name}", download_path)
|
||||
logger.info(f"delete {zip_path}")
|
||||
os.remove(zip_path)
|
||||
logger.info(f"delete temp/{download_name}-main/")
|
||||
shutil.rmtree(f"temp/{download_name}-main")
|
||||
else:
|
||||
if not self.check_file_available(file_path):
|
||||
logger.init(f'{file_path}', status="Downloading")
|
||||
self.download_file(download_url, file_path)
|
||||
self.init()
|
||||
return True
|
||||
|
||||
def download_all_models(self):
|
||||
for model in self.get_filtered_model_names(download_all = True):
|
||||
if not self.check_model_available(model):
|
||||
logger.init(f"{model}", status="Downloading")
|
||||
self.download_model(model)
|
||||
else:
|
||||
logger.info(f"{model} is already downloaded.")
|
||||
return True
|
||||
|
||||
def download_all_dependencies(self):
|
||||
for dependency in self.dependencies:
|
||||
if not self.check_dependency_available(dependency):
|
||||
logger.init(f"{dependency}",status="Downloading")
|
||||
self.download_dependency(dependency)
|
||||
else:
|
||||
logger.info(f"{dependency} is already installed.")
|
||||
return True
|
||||
|
||||
def download_all(self):
|
||||
self.download_all_dependencies()
|
||||
self.download_all_models()
|
||||
return True
|
||||
|
||||
def check_all_available(self):
|
||||
for model in self.models:
|
||||
if not self.check_available(self.get_model_files(model)):
|
||||
return False
|
||||
for dependency in self.dependencies:
|
||||
if not self.check_available(self.get_dependency_files(dependency)):
|
||||
return False
|
||||
return True
|
||||
|
||||
def check_model_available(self, model_name):
|
||||
if model_name not in self.models:
|
||||
return False
|
||||
return self.check_available(self.get_model_files(model_name))
|
||||
|
||||
def check_dependency_available(self, dependency_name):
|
||||
if dependency_name not in self.dependencies:
|
||||
return False
|
||||
return self.check_available(self.get_dependency_files(dependency_name))
|
||||
|
||||
def check_all_available(self):
|
||||
for model in self.models:
|
||||
if not self.check_model_available(model):
|
||||
return False
|
||||
for dependency in self.dependencies:
|
||||
if not self.check_dependency_available(dependency):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,48 +0,0 @@
|
||||
# Class realesrgan
|
||||
# Inputs:
|
||||
# - model
|
||||
# - device
|
||||
# - output_dir
|
||||
# - output_ext
|
||||
# outupts:
|
||||
# - output_images
|
||||
import PIL
|
||||
from torchvision import transforms
|
||||
import numpy as np
|
||||
import os
|
||||
import cv2
|
||||
|
||||
from nataili.util.save_sample import save_sample
|
||||
|
||||
class realesrgan:
|
||||
def __init__(self, model, device, output_dir, output_ext='jpg'):
|
||||
self.model = model
|
||||
self.device = device
|
||||
self.output_dir = output_dir
|
||||
self.output_ext = output_ext
|
||||
self.output_images = []
|
||||
|
||||
def generate(self, input_image):
|
||||
# load image
|
||||
img = cv2.imread(input_image, cv2.IMREAD_UNCHANGED)
|
||||
if len(img.shape) == 3 and img.shape[2] == 4:
|
||||
img_mode = 'RGBA'
|
||||
else:
|
||||
img_mode = None
|
||||
# upscale
|
||||
output, _ = self.model.enhance(img)
|
||||
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
||||
self.output_ext = 'png'
|
||||
|
||||
esrgan_sample = output[:,:,::-1]
|
||||
esrgan_image = PIL.Image.fromarray(esrgan_sample)
|
||||
# append model name to output image name
|
||||
filename = os.path.basename(input_image)
|
||||
filename = os.path.splitext(filename)[0]
|
||||
filename = f'{filename}_esrgan'
|
||||
filename_with_ext = f'{filename}.{self.output_ext}'
|
||||
output_image = os.path.join(self.output_dir, filename_with_ext)
|
||||
save_sample(esrgan_image, filename, self.output_dir, self.output_ext)
|
||||
self.output_images.append(output_image)
|
||||
return
|
||||
|
@ -1,48 +0,0 @@
|
||||
# Class realesrgan
|
||||
# Inputs:
|
||||
# - model
|
||||
# - device
|
||||
# - output_dir
|
||||
# - output_ext
|
||||
# outupts:
|
||||
# - output_images
|
||||
import PIL
|
||||
from torchvision import transforms
|
||||
import numpy as np
|
||||
import os
|
||||
import cv2
|
||||
|
||||
from nataili.util.save_sample import save_sample
|
||||
|
||||
class realesrgan:
|
||||
def __init__(self, model, device, output_dir, output_ext='jpg'):
|
||||
self.model = model
|
||||
self.device = device
|
||||
self.output_dir = output_dir
|
||||
self.output_ext = output_ext
|
||||
self.output_images = []
|
||||
|
||||
def generate(self, input_image):
|
||||
# load image
|
||||
img = cv2.imread(input_image, cv2.IMREAD_UNCHANGED)
|
||||
if len(img.shape) == 3 and img.shape[2] == 4:
|
||||
img_mode = 'RGBA'
|
||||
else:
|
||||
img_mode = None
|
||||
# upscale
|
||||
output, _ = self.model.enhance(img)
|
||||
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
||||
self.output_ext = 'png'
|
||||
|
||||
esrgan_sample = output[:,:,::-1]
|
||||
esrgan_image = PIL.Image.fromarray(esrgan_sample)
|
||||
# append model name to output image name
|
||||
filename = os.path.basename(input_image)
|
||||
filename = os.path.splitext(filename)[0]
|
||||
filename = f'{filename}_esrgan'
|
||||
filename_with_ext = f'{filename}.{self.output_ext}'
|
||||
output_image = os.path.join(self.output_dir, filename_with_ext)
|
||||
save_sample(esrgan_image, filename, self.output_dir, self.output_ext)
|
||||
self.output_images.append(output_image)
|
||||
return
|
||||
|
@ -1 +0,0 @@
|
||||
from nataili.util.logger import logger,set_logger_verbosity, quiesce_logger, test_logger
|
@ -1,16 +0,0 @@
|
||||
import gc
|
||||
|
||||
import torch
|
||||
import threading
|
||||
import pynvml
|
||||
import time
|
||||
|
||||
with torch.no_grad():
|
||||
def torch_gc():
|
||||
for _ in range(2):
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.ipc_collect()
|
||||
torch.cuda.synchronize()
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
torch.cuda.reset_accumulated_memory_stats()
|
@ -1,18 +0,0 @@
|
||||
def check_prompt_length(model, prompt, comments):
|
||||
"""this function tests if prompt is too long, and if so, adds a message to comments"""
|
||||
|
||||
tokenizer = model.cond_stage_model.tokenizer
|
||||
max_length = model.cond_stage_model.max_length
|
||||
|
||||
info = model.cond_stage_model.tokenizer([prompt], truncation=True, max_length=max_length,
|
||||
return_overflowing_tokens=True, padding="max_length", return_tensors="pt")
|
||||
ovf = info['overflowing_tokens'][0]
|
||||
overflowing_count = ovf.shape[0]
|
||||
if overflowing_count == 0:
|
||||
return
|
||||
|
||||
vocab = {v: k for k, v in tokenizer.get_vocab().items()}
|
||||
overflowing_words = [vocab.get(int(x), "") for x in ovf]
|
||||
overflowing_text = tokenizer.convert_tokens_to_string(''.join(overflowing_words))
|
||||
comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
|
||||
del tokenizer
|
@ -1,22 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
def get_next_sequence_number(path, prefix=''):
|
||||
"""
|
||||
Determines and returns the next sequence number to use when saving an
|
||||
image in the specified directory.
|
||||
|
||||
If a prefix is given, only consider files whose names start with that
|
||||
prefix, and strip the prefix from filenames before extracting their
|
||||
sequence number.
|
||||
|
||||
The sequence starts at 0.
|
||||
"""
|
||||
result = -1
|
||||
for p in Path(path).iterdir():
|
||||
if p.name.endswith(('.png', '.jpg')) and p.name.startswith(prefix):
|
||||
tmp = p.name[len(prefix):]
|
||||
try:
|
||||
result = max(int(tmp.split('-')[0]), result)
|
||||
except ValueError:
|
||||
pass
|
||||
return result + 1
|
@ -1,21 +0,0 @@
|
||||
import math
|
||||
|
||||
import PIL
|
||||
|
||||
|
||||
def image_grid(imgs, n_rows=None):
|
||||
if n_rows is not None:
|
||||
rows = n_rows
|
||||
else:
|
||||
rows = math.sqrt(len(imgs))
|
||||
rows = round(rows)
|
||||
|
||||
cols = math.ceil(len(imgs) / rows)
|
||||
|
||||
w, h = imgs[0].size
|
||||
grid = PIL.Image.new('RGB', size=(cols * w, rows * h), color='black')
|
||||
|
||||
for i, img in enumerate(imgs):
|
||||
grid.paste(img, box=(i % cols * w, i // cols * h))
|
||||
|
||||
return grid
|
@ -1,40 +0,0 @@
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
|
||||
loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
|
||||
# separate token and the embeds
|
||||
if learned_embeds_path.endswith('.pt'):
|
||||
# old format
|
||||
# token = * so replace with file directory name when converting
|
||||
trained_token = os.path.basename(learned_embeds_path)
|
||||
params_dict = {
|
||||
trained_token: torch.tensor(list(loaded_learned_embeds['string_to_param'].items())[0][1])
|
||||
}
|
||||
learned_embeds_path = os.path.splitext(learned_embeds_path)[0] + '.bin'
|
||||
torch.save(params_dict, learned_embeds_path)
|
||||
loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
|
||||
trained_token = list(loaded_learned_embeds.keys())[0]
|
||||
embeds = loaded_learned_embeds[trained_token]
|
||||
elif learned_embeds_path.endswith('.bin'):
|
||||
trained_token = list(loaded_learned_embeds.keys())[0]
|
||||
embeds = loaded_learned_embeds[trained_token]
|
||||
|
||||
embeds = loaded_learned_embeds[trained_token]
|
||||
# cast to dtype of text_encoder
|
||||
dtype = text_encoder.get_input_embeddings().weight.dtype
|
||||
embeds.to(dtype)
|
||||
|
||||
# add the token in tokenizer
|
||||
token = token if token is not None else trained_token
|
||||
num_added_tokens = tokenizer.add_tokens(token)
|
||||
|
||||
# resize the token embeddings
|
||||
text_encoder.resize_token_embeddings(len(tokenizer))
|
||||
|
||||
# get the id for the token and assign the embeds
|
||||
token_id = tokenizer.convert_tokens_to_ids(token)
|
||||
text_encoder.get_input_embeddings().weight.data[token_id] = embeds
|
||||
return token
|
@ -1,102 +0,0 @@
|
||||
import sys
|
||||
from functools import partialmethod
|
||||
from loguru import logger
|
||||
|
||||
STDOUT_LEVELS = ["GENERATION", "PROMPT"]
|
||||
INIT_LEVELS = ["INIT", "INIT_OK", "INIT_WARN", "INIT_ERR"]
|
||||
MESSAGE_LEVELS = ["MESSAGE"]
|
||||
# By default we're at error level or higher
|
||||
verbosity = 20
|
||||
quiet = 0
|
||||
|
||||
def set_logger_verbosity(count):
|
||||
global verbosity
|
||||
# The count comes reversed. So count = 0 means minimum verbosity
|
||||
# While count 5 means maximum verbosity
|
||||
# So the more count we have, the lowe we drop the versbosity maximum
|
||||
verbosity = 20 - (count * 10)
|
||||
|
||||
def quiesce_logger(count):
|
||||
global quiet
|
||||
# The bigger the count, the more silent we want our logger
|
||||
quiet = count * 10
|
||||
|
||||
def is_stdout_log(record):
|
||||
if record["level"].name not in STDOUT_LEVELS:
|
||||
return(False)
|
||||
if record["level"].no < verbosity + quiet:
|
||||
return(False)
|
||||
return(True)
|
||||
|
||||
def is_init_log(record):
|
||||
if record["level"].name not in INIT_LEVELS:
|
||||
return(False)
|
||||
if record["level"].no < verbosity + quiet:
|
||||
return(False)
|
||||
return(True)
|
||||
|
||||
def is_msg_log(record):
|
||||
if record["level"].name not in MESSAGE_LEVELS:
|
||||
return(False)
|
||||
if record["level"].no < verbosity + quiet:
|
||||
return(False)
|
||||
return(True)
|
||||
|
||||
def is_stderr_log(record):
|
||||
if record["level"].name in STDOUT_LEVELS + INIT_LEVELS + MESSAGE_LEVELS:
|
||||
return(False)
|
||||
if record["level"].no < verbosity + quiet:
|
||||
return(False)
|
||||
return(True)
|
||||
|
||||
def test_logger():
|
||||
logger.generation("This is a generation message\nIt is typically multiline\nThee Lines".encode("unicode_escape").decode("utf-8"))
|
||||
logger.prompt("This is a prompt message")
|
||||
logger.debug("Debug Message")
|
||||
logger.info("Info Message")
|
||||
logger.warning("Info Warning")
|
||||
logger.error("Error Message")
|
||||
logger.critical("Critical Message")
|
||||
logger.init("This is an init message", status="Starting")
|
||||
logger.init_ok("This is an init message", status="OK")
|
||||
logger.init_warn("This is an init message", status="Warning")
|
||||
logger.init_err("This is an init message", status="Error")
|
||||
logger.message("This is user message")
|
||||
sys.exit()
|
||||
|
||||
|
||||
logfmt = "<level>{level: <10}</level> | <green>{time:YYYY-MM-DD HH:mm:ss}</green> | <green>{name}</green>:<green>{function}</green>:<green>{line}</green> - <level>{message}</level>"
|
||||
genfmt = "<level>{level: <10}</level> @ <green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{message}</level>"
|
||||
initfmt = "<magenta>INIT </magenta> | <level>{extra[status]: <11}</level> | <magenta>{message}</magenta>"
|
||||
msgfmt = "<level>{level: <10}</level> | <level>{message}</level>"
|
||||
|
||||
try:
|
||||
logger.level("GENERATION", no=24, color="<cyan>")
|
||||
logger.level("PROMPT", no=23, color="<yellow>")
|
||||
logger.level("INIT", no=31, color="<white>")
|
||||
logger.level("INIT_OK", no=31, color="<green>")
|
||||
logger.level("INIT_WARN", no=31, color="<yellow>")
|
||||
logger.level("INIT_ERR", no=31, color="<red>")
|
||||
# Messages contain important information without which this application might not be able to be used
|
||||
# As such, they have the highest priority
|
||||
logger.level("MESSAGE", no=61, color="<green>")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
logger.__class__.generation = partialmethod(logger.__class__.log, "GENERATION")
|
||||
logger.__class__.prompt = partialmethod(logger.__class__.log, "PROMPT")
|
||||
logger.__class__.init = partialmethod(logger.__class__.log, "INIT")
|
||||
logger.__class__.init_ok = partialmethod(logger.__class__.log, "INIT_OK")
|
||||
logger.__class__.init_warn = partialmethod(logger.__class__.log, "INIT_WARN")
|
||||
logger.__class__.init_err = partialmethod(logger.__class__.log, "INIT_ERR")
|
||||
logger.__class__.message = partialmethod(logger.__class__.log, "MESSAGE")
|
||||
|
||||
config = {
|
||||
"handlers": [
|
||||
{"sink": sys.stderr, "format": logfmt, "colorize":True, "filter": is_stderr_log},
|
||||
{"sink": sys.stdout, "format": genfmt, "level": "PROMPT", "colorize":True, "filter": is_stdout_log},
|
||||
{"sink": sys.stdout, "format": initfmt, "level": "INIT", "colorize":True, "filter": is_init_log},
|
||||
{"sink": sys.stdout, "format": msgfmt, "level": "MESSAGE", "colorize":True, "filter": is_msg_log}
|
||||
],
|
||||
}
|
||||
logger.configure(**config)
|
@ -1,20 +0,0 @@
|
||||
import os
|
||||
|
||||
def save_sample(image, filename, sample_path, extension='png', jpg_quality=95, webp_quality=95, webp_lossless=True, png_compression=9):
|
||||
path = os.path.join(sample_path, filename + '.' + extension)
|
||||
if os.path.exists(path):
|
||||
return False
|
||||
if not os.path.exists(sample_path):
|
||||
os.makedirs(sample_path)
|
||||
if extension == 'png':
|
||||
image.save(path, format='PNG', compress_level=png_compression)
|
||||
elif extension == 'jpg':
|
||||
image.save(path, quality=jpg_quality, optimize=True)
|
||||
elif extension == 'webp':
|
||||
image.save(path, quality=webp_quality, lossless=webp_lossless)
|
||||
else:
|
||||
return False
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
else:
|
||||
return False
|
@ -1,22 +0,0 @@
|
||||
import random
|
||||
|
||||
def seed_to_int(s):
|
||||
if type(s) is int:
|
||||
return s
|
||||
if s is None or s == '':
|
||||
return random.randint(0, 2**32 - 1)
|
||||
|
||||
if type(s) is list:
|
||||
seed_list = []
|
||||
for seed in s:
|
||||
if seed is None or seed == '':
|
||||
seed_list.append(random.randint(0, 2**32 - 1))
|
||||
else:
|
||||
seed_list = s
|
||||
|
||||
return seed_list
|
||||
|
||||
n = abs(int(s) if s.isdigit() else random.Random(s).randint(0, 2**32 - 1))
|
||||
while n >= 2**32:
|
||||
n = n >> 32
|
||||
return n
|
34
scripts/post_processing.py
Normal file
@ -0,0 +1,34 @@
|
||||
# This file is part of sygil-webui (https://github.com/Sygil-Dev/sandbox-webui/).
|
||||
|
||||
# Copyright 2022 Sygil-Dev team.
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
#from sd_utils import *
|
||||
from sd_utils import *
|
||||
# streamlit imports
|
||||
|
||||
#streamlit components section
|
||||
|
||||
#other imports
|
||||
import os, time, requests
|
||||
import sys
|
||||
|
||||
# Temp imports
|
||||
|
||||
# end of imports
|
||||
#---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def layout():
|
||||
st.info("Under Construction. :construction_worker:")
|
@ -14,17 +14,21 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st
|
||||
|
||||
# streamlit imports
|
||||
import streamlit.components.v1 as components
|
||||
#other imports
|
||||
|
||||
import os, math
|
||||
from PIL import Image
|
||||
|
||||
# Temp imports
|
||||
#from basicsr.utils.registry import ARCH_REGISTRY
|
||||
|
||||
|
||||
class plugin_info():
|
||||
plugname = "concept_library"
|
||||
description = "Concept Library"
|
||||
displayPriority = 4
|
||||
|
||||
# end of imports
|
||||
#---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# Init Vuejs component
|
||||
_component_func = components.declare_component(
|
||||
|
2795
scripts/sd_utils/__init__.py
Normal file
182
scripts/sd_utils/bridge.py
Normal file
@ -0,0 +1,182 @@
|
||||
# This file is part of sygil-webui (https://github.com/Sygil-Dev/sygil-webui/).
|
||||
|
||||
# Copyright 2022 Sygil-Dev team.
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# base webui import and utils.
|
||||
#import streamlit as st
|
||||
|
||||
# We import hydralit like this to replace the previous stuff
|
||||
# we had with native streamlit as it lets ur replace things 1:1
|
||||
from sd_utils import logger, load_models
|
||||
|
||||
# streamlit imports
|
||||
|
||||
#streamlit components section
|
||||
|
||||
#other imports
|
||||
import requests, time, json, base64
|
||||
from io import BytesIO
|
||||
|
||||
# import custom components
|
||||
|
||||
|
||||
# end of imports
|
||||
#---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
@logger.catch(reraise=True)
|
||||
def run_bridge(interval, api_key, horde_name, horde_url, priority_usernames, horde_max_pixels, horde_nsfw, horde_censor_nsfw, horde_blacklist, horde_censorlist):
|
||||
current_id = None
|
||||
current_payload = None
|
||||
loop_retry = 0
|
||||
# load the model for stable horde if its not in memory already
|
||||
# we should load it after we get the request from the API in
|
||||
# case the model is different from the loaded in memory but
|
||||
# for now we can load it here so its read right away.
|
||||
load_models(use_GFPGAN=True)
|
||||
while True:
|
||||
|
||||
if loop_retry > 10 and current_id:
|
||||
logger.info(f"Exceeded retry count {loop_retry} for generation id {current_id}. Aborting generation!")
|
||||
current_id = None
|
||||
current_payload = None
|
||||
current_generation = None
|
||||
loop_retry = 0
|
||||
elif current_id:
|
||||
logger.info(f"Retrying ({loop_retry}/10) for generation id {current_id}...")
|
||||
gen_dict = {
|
||||
"name": horde_name,
|
||||
"max_pixels": horde_max_pixels,
|
||||
"priority_usernames": priority_usernames,
|
||||
"nsfw": horde_nsfw,
|
||||
"blacklist": horde_blacklist,
|
||||
"models": ["stable_diffusion"],
|
||||
}
|
||||
headers = {"apikey": api_key}
|
||||
if current_id:
|
||||
loop_retry += 1
|
||||
else:
|
||||
try:
|
||||
pop_req = requests.post(horde_url + '/api/v2/generate/pop', json = gen_dict, headers = headers)
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.warning(f"Server {horde_url} unavailable during pop. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
continue
|
||||
except requests.exceptions.JSONDecodeError():
|
||||
logger.warning(f"Server {horde_url} unavailable during pop. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
continue
|
||||
try:
|
||||
pop = pop_req.json()
|
||||
except json.decoder.JSONDecodeError:
|
||||
logger.warning(f"Could not decode response from {horde_url} as json. Please inform its administrator!")
|
||||
time.sleep(interval)
|
||||
continue
|
||||
if pop == None:
|
||||
logger.warning(f"Something has gone wrong with {horde_url}. Please inform its administrator!")
|
||||
time.sleep(interval)
|
||||
continue
|
||||
if not pop_req.ok:
|
||||
message = pop['message']
|
||||
logger.warning(f"During gen pop, server {horde_url} responded with status code {pop_req.status_code}: {pop['message']}. Waiting for 10 seconds...")
|
||||
if 'errors' in pop:
|
||||
logger.debug(f"Detailed Request Errors: {pop['errors']}")
|
||||
time.sleep(10)
|
||||
continue
|
||||
if not pop.get("id"):
|
||||
skipped_info = pop.get('skipped')
|
||||
if skipped_info and len(skipped_info):
|
||||
skipped_info = f" Skipped Info: {skipped_info}."
|
||||
else:
|
||||
skipped_info = ''
|
||||
logger.info(f"Server {horde_url} has no valid generations to do for us.{skipped_info}")
|
||||
time.sleep(interval)
|
||||
continue
|
||||
current_id = pop['id']
|
||||
logger.info(f"Request with id {current_id} picked up. Initiating work...")
|
||||
current_payload = pop['payload']
|
||||
if 'toggles' in current_payload and current_payload['toggles'] == None:
|
||||
logger.error(f"Received Bad payload: {pop}")
|
||||
current_id = None
|
||||
current_payload = None
|
||||
current_generation = None
|
||||
loop_retry = 0
|
||||
time.sleep(10)
|
||||
continue
|
||||
|
||||
logger.debug(current_payload)
|
||||
current_payload['toggles'] = current_payload.get('toggles', [1,4])
|
||||
# In bridge-mode, matrix is prepared on the horde and split in multiple nodes
|
||||
if 0 in current_payload['toggles']:
|
||||
current_payload['toggles'].remove(0)
|
||||
if 8 not in current_payload['toggles']:
|
||||
if horde_censor_nsfw and not horde_nsfw:
|
||||
current_payload['toggles'].append(8)
|
||||
elif any(word in current_payload['prompt'] for word in horde_censorlist):
|
||||
current_payload['toggles'].append(8)
|
||||
|
||||
from txt2img import txt2img
|
||||
|
||||
|
||||
"""{'prompt': 'Centred Husky, inside spiral with circular patterns, trending on dribbble, knotwork, spirals, key patterns,
|
||||
zoomorphics, ', 'ddim_steps': 30, 'n_iter': 1, 'sampler_name': 'DDIM', 'cfg_scale': 16.0, 'seed': '3405278433', 'height': 512, 'width': 512}"""
|
||||
|
||||
#images, seed, info, stats = txt2img(**current_payload)
|
||||
images, seed, info, stats = txt2img(str(current_payload['prompt']), int(current_payload['ddim_steps']), str(current_payload['sampler_name']),
|
||||
int(current_payload['n_iter']), 1, float(current_payload["cfg_scale"]), str(current_payload["seed"]),
|
||||
int(current_payload["height"]), int(current_payload["width"]), save_grid=False, group_by_prompt=False,
|
||||
save_individual_images=False,write_info_files=False)
|
||||
|
||||
buffer = BytesIO()
|
||||
# We send as WebP to avoid using all the horde bandwidth
|
||||
images[0].save(buffer, format="WebP", quality=90)
|
||||
# logger.info(info)
|
||||
submit_dict = {
|
||||
"id": current_id,
|
||||
"generation": base64.b64encode(buffer.getvalue()).decode("utf8"),
|
||||
"api_key": api_key,
|
||||
"seed": seed,
|
||||
"max_pixels": horde_max_pixels,
|
||||
}
|
||||
current_generation = seed
|
||||
while current_id and current_generation != None:
|
||||
try:
|
||||
submit_req = requests.post(horde_url + '/api/v2/generate/submit', json = submit_dict, headers = headers)
|
||||
try:
|
||||
submit = submit_req.json()
|
||||
except json.decoder.JSONDecodeError:
|
||||
logger.error(f"Something has gone wrong with {horde_url} during submit. Please inform its administrator! (Retry {loop_retry}/10)")
|
||||
time.sleep(interval)
|
||||
continue
|
||||
if submit_req.status_code == 404:
|
||||
logger.info(f"The generation we were working on got stale. Aborting!")
|
||||
elif not submit_req.ok:
|
||||
logger.error(f"During gen submit, server {horde_url} responded with status code {submit_req.status_code}: {submit['message']}. Waiting for 10 seconds... (Retry {loop_retry}/10)")
|
||||
if 'errors' in submit:
|
||||
logger.debug(f"Detailed Request Errors: {submit['errors']}")
|
||||
time.sleep(10)
|
||||
continue
|
||||
else:
|
||||
logger.info(f'Submitted generation with id {current_id} and contributed for {submit_req.json()["reward"]}')
|
||||
current_id = None
|
||||
current_payload = None
|
||||
current_generation = None
|
||||
loop_retry = 0
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.warning(f"Server {horde_url} unavailable during submit. Waiting 10 seconds... (Retry {loop_retry}/10)")
|
||||
time.sleep(10)
|
||||
continue
|
||||
time.sleep(interval)
|
@ -75,15 +75,9 @@ from pathlib import Path
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
#import librosa
|
||||
#from logger import logger, set_logger_verbosity, quiesce_logger
|
||||
from logger import logger, set_logger_verbosity, quiesce_logger
|
||||
#from loguru import logger
|
||||
|
||||
#from nataili.inference.compvis.img2img import img2img
|
||||
#from nataili.model_manager import ModelManager
|
||||
#from nataili.inference.compvis.txt2img import txt2img
|
||||
from nataili.util.cache import torch_gc
|
||||
from nataili.util.logger import logger, set_logger_verbosity, quiesce_logger
|
||||
|
||||
try:
|
||||
from realesrgan import RealESRGANer
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
@ -468,7 +462,6 @@ def load_models(use_LDSR = False, LDSR_model='model', use_GFPGAN=False, GFPGAN_m
|
||||
try:
|
||||
server_state["model"].args.use_multiprocessing_for_evaluation = False
|
||||
except AttributeError as e:
|
||||
logger.error(e)
|
||||
pass
|
||||
|
||||
if st.session_state.defaults.general.enable_attention_slicing:
|
||||
@ -2017,6 +2010,8 @@ def GFPGAN_available():
|
||||
st.session_state["GFPGAN_available"] = True
|
||||
else:
|
||||
st.session_state["GFPGAN_available"] = False
|
||||
st.session_state["use_GFPGAN"] = False
|
||||
st.session_state["GFPGAN_model"] = "GFPGANv1.4"
|
||||
|
||||
#
|
||||
def RealESRGAN_available():
|
||||
@ -2035,6 +2030,8 @@ def RealESRGAN_available():
|
||||
st.session_state["RealESRGAN_available"] = True
|
||||
else:
|
||||
st.session_state["RealESRGAN_available"] = False
|
||||
st.session_state["use_RealESRGAN"] = False
|
||||
st.session_state["RealESRGAN_model"] = "RealESRGAN_x4plus"
|
||||
#
|
||||
def LDSR_available():
|
||||
#with server_state_lock["RealESRGAN_models"]:
|
||||
@ -2055,6 +2052,8 @@ def LDSR_available():
|
||||
st.session_state["LDSR_available"] = True
|
||||
else:
|
||||
st.session_state["LDSR_available"] = False
|
||||
st.session_state["use_LDSR"] = False
|
||||
st.session_state["LDSR_model"] = "model"
|
||||
|
||||
|
||||
|
@ -14,21 +14,23 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st, set_page_title, seed_to_int
|
||||
|
||||
# streamlit imports
|
||||
from streamlit import StopException
|
||||
from streamlit.runtime.scriptrunner import StopException
|
||||
from streamlit_tensorboard import st_tensorboard
|
||||
|
||||
#streamlit components section
|
||||
from streamlit_server_state import server_state
|
||||
|
||||
#other imports
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
# Temp imports
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import math
|
||||
import os, sys
|
||||
import os
|
||||
import random
|
||||
#import datetime
|
||||
#from pathlib import Path
|
||||
@ -41,14 +43,13 @@ import torch.utils.checkpoint
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
import PIL
|
||||
from accelerate import Accelerator, tracking
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import set_seed
|
||||
from diffusers import AutoencoderKL, DDPMScheduler, LMSDiscreteScheduler, StableDiffusionPipeline, UNet2DConditionModel#, PNDMScheduler
|
||||
from diffusers.optimization import get_scheduler
|
||||
#from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
||||
from pipelines.stable_diffusion.no_check import NoCheck
|
||||
from huggingface_hub import HfFolder, whoami#, Repository
|
||||
from PIL import Image
|
||||
from torchvision import transforms
|
||||
from tqdm.auto import tqdm
|
||||
@ -57,8 +58,6 @@ from slugify import slugify
|
||||
import json
|
||||
import os#, subprocess
|
||||
#from io import StringIO
|
||||
#import sys
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
|
||||
|
||||
# end of imports
|
||||
|
@ -14,20 +14,38 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# base webui import and utils.
|
||||
from sd_utils import *
|
||||
from sd_utils import st, MemUsageMonitor, server_state, \
|
||||
get_next_sequence_number, check_prompt_length, torch_gc, \
|
||||
save_sample, generation_callback, process_images, \
|
||||
KDiffusionSampler, \
|
||||
custom_models_available, RealESRGAN_available, GFPGAN_available, \
|
||||
LDSR_available, load_models, hc, seed_to_int, logger
|
||||
|
||||
# streamlit imports
|
||||
from streamlit import StopException
|
||||
from streamlit.runtime.scriptrunner import StopException
|
||||
|
||||
#streamlit components section
|
||||
import streamlit_nested_layout #used to allow nested columns, just importing it is enought
|
||||
|
||||
#from streamlit.elements import image as STImage
|
||||
import streamlit.components.v1 as components
|
||||
from streamlit.runtime.media_file_manager import media_file_manager
|
||||
#from streamlit.runtime.media_file_manager import media_file_manager
|
||||
from streamlit.elements.image import image_to_url
|
||||
|
||||
#other imports
|
||||
import uuid
|
||||
|
||||
import base64, uuid
|
||||
import os, sys, datetime, time
|
||||
from PIL import Image
|
||||
import requests
|
||||
from slugify import slugify
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from typing import Union
|
||||
from io import BytesIO
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
|
||||
|
||||
# streamlit components
|
||||
from custom_components import sygil_suggestions
|
||||
|
||||
@ -106,7 +124,7 @@ def stable_horde(outpath, prompt, seed, sampler_name, save_grid, batch_size,
|
||||
|
||||
log.append("Generating image with Stable Horde.")
|
||||
|
||||
st.session_state["progress_bar_text"].code('\n'.join(str(log)), language='')
|
||||
st.session_state["progress_bar_text"].code('\n'.join(log), language='')
|
||||
|
||||
# start time after garbage collection (or before?)
|
||||
start_time = time.time()
|
||||
@ -157,7 +175,7 @@ def stable_horde(outpath, prompt, seed, sampler_name, save_grid, batch_size,
|
||||
logger.debug(submit_results)
|
||||
|
||||
log.append(submit_results)
|
||||
st.session_state["progress_bar_text"].code('\n'.join(str(log)), language='')
|
||||
st.session_state["progress_bar_text"].code(''.join(str(log)), language='')
|
||||
|
||||
req_id = submit_results['id']
|
||||
is_done = False
|
||||
@ -282,7 +300,7 @@ def txt2img(prompt: str, ddim_steps: int, sampler_name: str, n_iter: int, batch_
|
||||
RealESRGAN_model: str = "RealESRGAN_x4plus_anime_6B", use_LDSR: bool = True, LDSR_model: str = "model",
|
||||
fp = None, variant_amount: float = 0.0,
|
||||
variant_seed: int = None, ddim_eta:float = 0.0, write_info_files:bool = True,
|
||||
use_stable_horde: bool = False, stable_horde_key:str = ''):
|
||||
use_stable_horde: bool = False, stable_horde_key:str = "0000000000"):
|
||||
|
||||
outpath = st.session_state['defaults'].general.outdir_txt2img
|
||||
|
||||
@ -475,7 +493,7 @@ def layout():
|
||||
|
||||
with gallery_tab:
|
||||
st.session_state["gallery"] = st.empty()
|
||||
st.session_state["gallery"].info("Nothing but crickets here, try generating something first.")
|
||||
#st.session_state["gallery"].info("Nothing but crickets here, try generating something first.")
|
||||
|
||||
with col3:
|
||||
# If we have custom models available on the "models/custom"
|
||||
@ -502,7 +520,7 @@ def layout():
|
||||
with st.expander("Advanced"):
|
||||
with st.expander("Stable Horde"):
|
||||
use_stable_horde = st.checkbox("Use Stable Horde", value=False, help="Use the Stable Horde to generate images. More info can be found at https://stablehorde.net/")
|
||||
stable_horde_key = st.text_input("Stable Horde Api Key", value='', type="password",
|
||||
stable_horde_key = st.text_input("Stable Horde Api Key", value=st.session_state['defaults'].general.stable_horde_api, type="password",
|
||||
help="Optional Api Key used for the Stable Horde Bridge, if no api key is added the horde will be used anonymously.")
|
||||
|
||||
with st.expander("Output Settings"):
|
||||
@ -570,7 +588,9 @@ def layout():
|
||||
|
||||
#print (st.session_state["RealESRGAN_available"])
|
||||
st.session_state["upscaling_method"] = st.selectbox("Upscaling Method", upscaling_method_list,
|
||||
index=upscaling_method_list.index(str(st.session_state['defaults'].general.upscaling_method)))
|
||||
index=upscaling_method_list.index(st.session_state['defaults'].general.upscaling_method)
|
||||
if st.session_state['defaults'].general.upscaling_method in upscaling_method_list
|
||||
else 0)
|
||||
|
||||
if st.session_state["RealESRGAN_available"]:
|
||||
with st.expander("RealESRGAN"):
|
||||
@ -654,42 +674,9 @@ def layout():
|
||||
|
||||
message.success('Render Complete: ' + info + '; Stats: ' + stats, icon="✅")
|
||||
|
||||
#history_tab,col1,col2,col3,PlaceHolder,col1_cont,col2_cont,col3_cont = st.session_state['historyTab']
|
||||
|
||||
#if 'latestImages' in st.session_state:
|
||||
#for i in output_images:
|
||||
##push the new image to the list of latest images and remove the oldest one
|
||||
##remove the last index from the list\
|
||||
#st.session_state['latestImages'].pop()
|
||||
##add the new image to the start of the list
|
||||
#st.session_state['latestImages'].insert(0, i)
|
||||
#PlaceHolder.empty()
|
||||
#with PlaceHolder.container():
|
||||
#col1, col2, col3 = st.columns(3)
|
||||
#col1_cont = st.container()
|
||||
#col2_cont = st.container()
|
||||
#col3_cont = st.container()
|
||||
#images = st.session_state['latestImages']
|
||||
#with col1_cont:
|
||||
#with col1:
|
||||
#[st.image(images[index]) for index in [0, 3, 6] if index < len(images)]
|
||||
#with col2_cont:
|
||||
#with col2:
|
||||
#[st.image(images[index]) for index in [1, 4, 7] if index < len(images)]
|
||||
#with col3_cont:
|
||||
#with col3:
|
||||
#[st.image(images[index]) for index in [2, 5, 8] if index < len(images)]
|
||||
#historyGallery = st.empty()
|
||||
|
||||
## check if output_images length is the same as seeds length
|
||||
#with gallery_tab:
|
||||
#st.markdown(createHTMLGallery(output_images,seeds), unsafe_allow_html=True)
|
||||
|
||||
|
||||
#st.session_state['historyTab'] = [history_tab,col1,col2,col3,PlaceHolder,col1_cont,col2_cont,col3_cont]
|
||||
|
||||
with gallery_tab:
|
||||
logger.info(seeds)
|
||||
st.session_state["gallery"].text = ""
|
||||
sdGallery(output_images)
|
||||
|
||||
|
||||
|
@ -21,25 +21,32 @@ https://github.com/nateraw/stable-diffusion-videos
|
||||
repo and the original gist script from
|
||||
https://gist.github.com/karpathy/00103b0037c5aaea32fe1da1af553355
|
||||
"""
|
||||
from sd_utils import *
|
||||
from sd_utils import st, MemUsageMonitor, server_state, torch_gc, \
|
||||
custom_models_available, RealESRGAN_available, GFPGAN_available, \
|
||||
LDSR_available, hc, seed_to_int, logger, slerp, optimize_update_preview_frequency, \
|
||||
load_learned_embed_in_clip, load_GFPGAN, RealESRGANModel
|
||||
|
||||
|
||||
# streamlit imports
|
||||
from streamlit import StopException
|
||||
from streamlit.elements import image as STImage
|
||||
from streamlit.runtime.scriptrunner import StopException
|
||||
#from streamlit.elements import image as STImage
|
||||
|
||||
#streamlit components section
|
||||
from streamlit_server_state import server_state, server_state_lock
|
||||
#from streamlitextras.threader import lock, trigger_rerun, \
|
||||
#streamlit_thread, get_thread, \
|
||||
#last_trigger_time
|
||||
|
||||
#other imports
|
||||
|
||||
import os, sys, json
|
||||
import os, sys, json, re, random, datetime, time, warnings, mimetypes
|
||||
from PIL import Image
|
||||
import torch
|
||||
import numpy as np
|
||||
import time, inspect, timeit
|
||||
import torch
|
||||
from torch import autocast
|
||||
from io import BytesIO
|
||||
#from io import BytesIO
|
||||
import imageio
|
||||
from slugify import slugify
|
||||
|
||||
@ -58,9 +65,13 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
from typing import Callable, List, Optional, Union
|
||||
from pathlib import Path
|
||||
from torchvision.transforms.functional import pil_to_tensor
|
||||
from torchvision import transforms
|
||||
import librosa
|
||||
from PIL import Image
|
||||
from torchvision.io import write_video
|
||||
from torchvision import transforms
|
||||
import torch.nn as nn
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
# streamlit components
|
||||
@ -81,6 +92,14 @@ try:
|
||||
except:
|
||||
pass
|
||||
|
||||
# remove some annoying deprecation warnings that show every now and then.
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
|
||||
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
|
||||
mimetypes.init()
|
||||
mimetypes.add_type('application/javascript', '.js')
|
||||
|
||||
class plugin_info():
|
||||
plugname = "txt2vid"
|
||||
description = "Text to Image"
|
||||
@ -134,7 +153,7 @@ def get_timesteps_arr(audio_filepath, offset, duration, fps=30, margin=1.0, smoo
|
||||
#
|
||||
def make_video_pyav(
|
||||
frames_or_frame_dir: Union[str, Path, torch.Tensor],
|
||||
audio_filepath: Union[str, Path] = None,
|
||||
audio_filepath: Union[str, Path] = None,
|
||||
fps: int = 30,
|
||||
audio_offset: int = 0,
|
||||
audio_duration: int = 2,
|
||||
@ -169,14 +188,14 @@ def make_video_pyav(
|
||||
audio_tensor = torch.tensor(audio).unsqueeze(0)
|
||||
|
||||
write_video(
|
||||
output_filepath,
|
||||
frames,
|
||||
output_filepath,
|
||||
frames,
|
||||
fps=fps,
|
||||
audio_array=audio_tensor,
|
||||
audio_fps=sr,
|
||||
audio_array=audio_tensor,
|
||||
audio_fps=sr,
|
||||
audio_codec="aac",
|
||||
options={"crf": "10", "pix_fmt": "yuv420p"},
|
||||
)
|
||||
)
|
||||
else:
|
||||
write_video(output_filepath, frames, fps=fps, options={"crf": "10", "pix_fmt": "yuv420p"})
|
||||
|
||||
@ -210,8 +229,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
self,
|
||||
vae: AutoencoderKL,
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
@ -223,27 +242,27 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
|
||||
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
||||
deprecation_message = (
|
||||
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
||||
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
||||
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
||||
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
||||
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
||||
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
||||
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
||||
" file"
|
||||
)
|
||||
)
|
||||
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
||||
new_config = dict(scheduler.config)
|
||||
new_config["steps_offset"] = 1
|
||||
scheduler._internal_dict = FrozenDict(new_config)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
)
|
||||
|
||||
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
||||
r"""
|
||||
@ -272,8 +291,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(
|
||||
self,
|
||||
prompt: Optional[Union[str, List[str]]] = None,
|
||||
self,
|
||||
prompt: Optional[Union[str, List[str]]] = None,
|
||||
height: int = 512,
|
||||
width: int = 512,
|
||||
num_inference_steps: int = 50,
|
||||
@ -351,12 +370,12 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
||||
|
||||
if (callback_steps is None) or (
|
||||
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
||||
):
|
||||
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
||||
):
|
||||
raise ValueError(
|
||||
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
||||
f" {type(callback_steps)}."
|
||||
)
|
||||
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
||||
f" {type(callback_steps)}."
|
||||
)
|
||||
|
||||
if text_embeddings is None:
|
||||
if isinstance(prompt, str):
|
||||
@ -368,19 +387,18 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
|
||||
# get prompt text embeddings
|
||||
text_inputs = self.tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
return_tensors="pt",
|
||||
)
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
|
||||
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
||||
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
||||
print(
|
||||
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
||||
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
||||
)
|
||||
print("The following part of your input was truncated because CLIP can only handle sequences up to"
|
||||
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
||||
)
|
||||
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
||||
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
||||
else:
|
||||
@ -402,28 +420,28 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
uncond_tokens = [""]
|
||||
elif type(prompt) is not type(negative_prompt):
|
||||
raise TypeError(
|
||||
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
||||
f" {type(prompt)}."
|
||||
)
|
||||
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
||||
f" {type(prompt)}."
|
||||
)
|
||||
elif isinstance(negative_prompt, str):
|
||||
uncond_tokens = [negative_prompt]
|
||||
elif batch_size != len(negative_prompt):
|
||||
raise ValueError(
|
||||
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
||||
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
||||
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
||||
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
||||
" the batch size of `prompt`."
|
||||
)
|
||||
)
|
||||
else:
|
||||
uncond_tokens = negative_prompt
|
||||
|
||||
max_length = self.tokenizer.model_max_length
|
||||
uncond_input = self.tokenizer(
|
||||
uncond_tokens,
|
||||
padding="max_length",
|
||||
uncond_tokens,
|
||||
padding="max_length",
|
||||
max_length=max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
)
|
||||
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
||||
|
||||
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
||||
@ -447,8 +465,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
if self.device.type == "mps":
|
||||
# randn does not exist on mps
|
||||
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
||||
self.device
|
||||
)
|
||||
self.device
|
||||
)
|
||||
else:
|
||||
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
||||
else:
|
||||
@ -506,11 +524,11 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
|
||||
if self.safety_checker is not None:
|
||||
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
||||
self.device
|
||||
)
|
||||
self.device
|
||||
)
|
||||
image, has_nsfw_concept = self.safety_checker(
|
||||
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
||||
)
|
||||
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
||||
)
|
||||
else:
|
||||
has_nsfw_concept = None
|
||||
|
||||
@ -547,8 +565,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
embeds_batch, noise_batch = None, None
|
||||
|
||||
def make_clip_frames(
|
||||
self,
|
||||
prompt_a: str,
|
||||
self,
|
||||
prompt_a: str,
|
||||
prompt_b: str,
|
||||
seed_a: int,
|
||||
seed_b: int,
|
||||
@ -580,21 +598,21 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
self.upsampler.to(self.device)
|
||||
|
||||
batch_generator = self.generate_inputs(
|
||||
prompt_a,
|
||||
prompt_b,
|
||||
prompt_a,
|
||||
prompt_b,
|
||||
seed_a,
|
||||
seed_b,
|
||||
(1, self.unet.in_channels, height // 8, width // 8),
|
||||
T[skip:],
|
||||
batch_size,
|
||||
)
|
||||
)
|
||||
|
||||
frame_index = skip
|
||||
for _, embeds_batch, noise_batch in batch_generator:
|
||||
with torch.autocast("cuda"):
|
||||
outputs = self(
|
||||
latents=noise_batch,
|
||||
text_embeddings=embeds_batch,
|
||||
latents=noise_batch,
|
||||
text_embeddings=embeds_batch,
|
||||
height=height,
|
||||
width=width,
|
||||
guidance_scale=guidance_scale,
|
||||
@ -612,8 +630,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
frame_index += 1
|
||||
|
||||
def walk(
|
||||
self,
|
||||
prompts: Optional[List[str]] = None,
|
||||
self,
|
||||
prompt: Optional[List[str]] = None,
|
||||
seeds: Optional[List[int]] = None,
|
||||
num_interpolation_steps: Optional[Union[int, List[int]]] = 5, # int or list of int
|
||||
output_dir: Optional[str] = "./dreams",
|
||||
@ -722,12 +740,12 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
str: The resulting video filepath. This video includes all sub directories' video clips.
|
||||
"""
|
||||
if (callback_steps is None) or (
|
||||
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
||||
):
|
||||
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
||||
):
|
||||
raise ValueError(
|
||||
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
||||
f" {type(callback_steps)}."
|
||||
)
|
||||
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
||||
f" {type(callback_steps)}."
|
||||
)
|
||||
|
||||
# init the output dir
|
||||
if type(prompts) == str:
|
||||
@ -757,10 +775,10 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
prompt_config_path = Path(os.path.join(full_path, "prompt_config.json"))
|
||||
if not resume:
|
||||
prompt_config_path.write_text(
|
||||
json.dumps(
|
||||
dict(
|
||||
prompts=prompts,
|
||||
seeds=seeds,
|
||||
json.dumps(
|
||||
dict(
|
||||
prompts=prompts,
|
||||
seeds=seeds,
|
||||
num_interpolation_steps=num_interpolation_steps,
|
||||
fps=fps,
|
||||
num_inference_steps=num_inference_steps,
|
||||
@ -773,10 +791,10 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
audio_start_sec=audio_start_sec,
|
||||
),
|
||||
|
||||
indent=2,
|
||||
indent=2,
|
||||
sort_keys=False,
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
else:
|
||||
data = json.load(open(prompt_config_path))
|
||||
prompts = data["prompts"]
|
||||
@ -793,8 +811,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
audio_start_sec = data["audio_start_sec"]
|
||||
|
||||
for i, (prompt_a, prompt_b, seed_a, seed_b, num_step) in enumerate(
|
||||
zip(prompts, prompts[1:], seeds, seeds[1:], num_interpolation_steps)
|
||||
):
|
||||
zip(prompts, prompts[1:], seeds, seeds[1:], num_interpolation_steps)
|
||||
):
|
||||
# {name}_000000 / {name}_000001 / ...
|
||||
save_path = Path(f"{full_path}/{name}_{i:06d}")
|
||||
|
||||
@ -820,8 +838,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
audio_duration = num_step / fps
|
||||
|
||||
self.make_clip_frames(
|
||||
prompt_a,
|
||||
prompt_b,
|
||||
prompt_a,
|
||||
prompt_b,
|
||||
seed_a,
|
||||
seed_b,
|
||||
num_interpolation_steps=num_step,
|
||||
@ -836,8 +854,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
skip=skip,
|
||||
T=get_timesteps_arr(
|
||||
audio_filepath,
|
||||
offset=audio_offset,
|
||||
duration=audio_duration,
|
||||
offset=audio_offset,
|
||||
duration=audio_duration,
|
||||
fps=fps,
|
||||
margin=margin,
|
||||
smooth=smooth,
|
||||
@ -846,39 +864,39 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
)
|
||||
if audio_filepath
|
||||
else None,
|
||||
)
|
||||
)
|
||||
make_video_pyav(
|
||||
save_path,
|
||||
audio_filepath=audio_filepath,
|
||||
fps=fps,
|
||||
save_path,
|
||||
audio_filepath=audio_filepath,
|
||||
fps=fps,
|
||||
output_filepath=step_output_filepath,
|
||||
glob_pattern=f"*{image_file_ext}",
|
||||
audio_offset=audio_offset,
|
||||
audio_duration=audio_duration,
|
||||
sr=44100,
|
||||
)
|
||||
)
|
||||
|
||||
return make_video_pyav(
|
||||
full_path,
|
||||
audio_filepath=audio_filepath,
|
||||
full_path,
|
||||
audio_filepath=audio_filepath,
|
||||
fps=fps,
|
||||
audio_offset=audio_start_sec,
|
||||
audio_duration=sum(num_interpolation_steps) / fps,
|
||||
output_filepath=output_filepath,
|
||||
glob_pattern=f"**/*{image_file_ext}",
|
||||
sr=44100,
|
||||
)
|
||||
)
|
||||
|
||||
def embed_text(self, text):
|
||||
"""Helper to embed some text"""
|
||||
with torch.autocast("cuda"):
|
||||
text_input = self.tokenizer(
|
||||
text,
|
||||
padding="max_length",
|
||||
text,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
with torch.no_grad():
|
||||
embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
||||
return embed
|
||||
@ -888,16 +906,16 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
# randn does not exist on mps, so we create noise on CPU here and move it to the device after initialization
|
||||
if self.device.type == "mps":
|
||||
noise = torch.randn(
|
||||
noise_shape,
|
||||
device='cpu',
|
||||
noise_shape,
|
||||
device='cpu',
|
||||
generator=torch.Generator(device='cpu').manual_seed(seed),
|
||||
).to(self.device)
|
||||
).to(self.device)
|
||||
else:
|
||||
noise = torch.randn(
|
||||
noise_shape,
|
||||
device=self.device,
|
||||
noise_shape,
|
||||
device=self.device,
|
||||
generator=torch.Generator(device=self.device).manual_seed(seed),
|
||||
)
|
||||
)
|
||||
return noise
|
||||
|
||||
@classmethod
|
||||
@ -923,7 +941,7 @@ class StableDiffusionWalkPipeline(DiffusionPipeline):
|
||||
@torch.no_grad()
|
||||
def diffuse(
|
||||
pipe,
|
||||
cond_embeddings, # text conditioning, should be (1, 77, 768)
|
||||
cond_embeddings, # text conditioning, should be (1, 77, 768)
|
||||
cond_latents, # image conditioning, should be (1, 4, 64, 64)
|
||||
num_inference_steps,
|
||||
cfg_scale,
|
||||
@ -1011,7 +1029,7 @@ def diffuse(
|
||||
st.session_state["previous_chunk_speed_list"],
|
||||
st.session_state["update_preview_frequency"],
|
||||
st.session_state["avg_update_preview_frequency"] = optimize_update_preview_frequency(st.session_state["current_chunk_speed"],
|
||||
st.session_state["previous_chunk_speed_list"],
|
||||
st.session_state["previous_chunk_speed_list"],
|
||||
st.session_state["update_preview_frequency"],
|
||||
st.session_state["update_preview_frequency_list"])
|
||||
|
||||
@ -1049,18 +1067,18 @@ def diffuse(
|
||||
inference_progress = ""
|
||||
|
||||
total_percent = int(100 * float(i+1 if i+1 < (num_inference_steps + st.session_state.sampling_steps)
|
||||
else (num_inference_steps + st.session_state.sampling_steps))/float((num_inference_steps + st.session_state.sampling_steps)))
|
||||
else (num_inference_steps + st.session_state.sampling_steps))/float((num_inference_steps + st.session_state.sampling_steps)))
|
||||
|
||||
percent = int(100 * float(i+1 if i+1 < num_inference_steps else st.session_state.sampling_steps)/float(st.session_state.sampling_steps))
|
||||
frames_percent = int(100 * float(st.session_state.current_frame if st.session_state.current_frame < total_frames else total_frames)/float(total_frames))
|
||||
|
||||
if "progress_bar_text" in st.session_state:
|
||||
st.session_state["progress_bar_text"].text(
|
||||
f"Running step: {i+1 if i+1 < st.session_state.sampling_steps else st.session_state.sampling_steps}/{st.session_state.sampling_steps} "
|
||||
f"{percent if percent < 100 else 100}% {inference_progress}{duration:.2f}{speed} | "
|
||||
f"Running step: {i+1 if i+1 < st.session_state.sampling_steps else st.session_state.sampling_steps}/{st.session_state.sampling_steps} "
|
||||
f"{percent if percent < 100 else 100}% {inference_progress}{duration:.2f}{speed} | "
|
||||
f"Frame: {st.session_state.current_frame + 1 if st.session_state.current_frame < total_frames else total_frames}/{total_frames} "
|
||||
f"{frames_percent if frames_percent < 100 else 100}% {st.session_state.frame_duration:.2f}{st.session_state.frame_speed}"
|
||||
)
|
||||
f"{frames_percent if frames_percent < 100 else 100}% {st.session_state.frame_duration:.2f}{st.session_state.frame_speed}"
|
||||
)
|
||||
|
||||
if "progress_bar" in st.session_state:
|
||||
st.session_state["progress_bar"].progress(total_percent if total_percent < 100 else 100)
|
||||
@ -1108,26 +1126,27 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
model_path = os.path.join("models", "diffusers", "stable-diffusion-v1-5")
|
||||
|
||||
if not os.path.exists(model_path + "/model_index.json"):
|
||||
server_state["pipe"] = StableDiffusionWalkPipeline.from_pretrained(
|
||||
weights_path,
|
||||
use_local_file=True,
|
||||
server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
|
||||
weights_path,
|
||||
use_local_file=True,
|
||||
use_auth_token=st.session_state["defaults"].general.huggingface_token,
|
||||
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None,
|
||||
safety_checker=None, # Very important for videos...lots of false positives while interpolating
|
||||
#custom_pipeline="interpolate_stable_diffusion",
|
||||
)
|
||||
|
||||
StableDiffusionWalkPipeline.save_pretrained(server_state["pipe"], model_path)
|
||||
)
|
||||
|
||||
StableDiffusionPipeline.save_pretrained(server_state["pipe"], model_path)
|
||||
else:
|
||||
server_state["pipe"] = StableDiffusionWalkPipeline.from_pretrained(
|
||||
model_path,
|
||||
use_local_file=True,
|
||||
server_state["pipe"] = StableDiffusionPipeline.from_pretrained(
|
||||
model_path,
|
||||
use_local_file=True,
|
||||
torch_dtype=torch.float16 if st.session_state['defaults'].general.use_float16 else None,
|
||||
revision="fp16" if not st.session_state['defaults'].general.no_half else None,
|
||||
safety_checker=None, # Very important for videos...lots of false positives while interpolating
|
||||
#custom_pipeline="interpolate_stable_diffusion",
|
||||
)
|
||||
)
|
||||
|
||||
server_state["pipe"].unet.to(torch_device)
|
||||
server_state["pipe"].vae.to(torch_device)
|
||||
@ -1143,7 +1162,7 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
else:
|
||||
# if the float16 or no_half options have changed since the last time the model was loaded then we need to reload the model.
|
||||
if ("float16" in server_state and server_state['float16'] != st.session_state['defaults'].general.use_float16) \
|
||||
or ("no_half" in server_state and server_state['no_half'] != st.session_state['defaults'].general.no_half) \
|
||||
or ("no_half" in server_state and server_state['no_half'] != st.session_state['defaults'].general.no_half) \
|
||||
or ("optimized" in server_state and server_state['optimized'] != st.session_state['defaults'].general.optimized):
|
||||
|
||||
del server_state['float16']
|
||||
@ -1166,9 +1185,11 @@ def load_diffusers_model(weights_path,torch_device):
|
||||
if "huggingface_token" not in st.session_state or st.session_state["defaults"].general.huggingface_token == "None":
|
||||
if "progress_bar_text" in st.session_state:
|
||||
st.session_state["progress_bar_text"].error(
|
||||
"You need a huggingface token in order to use the Text to Video tab. Use the Settings page from the sidebar on the left to add your token."
|
||||
)
|
||||
raise OSError("You need a huggingface token in order to use the Text to Video tab. Use the Settings page from the sidebar on the left to add your token.")
|
||||
"You need a huggingface token in order to use the Text to Video tab. Use the Settings page to add your token under the Huggingface section. "
|
||||
"Make sure you save your settings after adding it."
|
||||
)
|
||||
raise OSError("You need a huggingface token in order to use the Text to Video tab. Use the Settings page to add your token under the Huggingface section. "
|
||||
"Make sure you save your settings after adding it.")
|
||||
else:
|
||||
if "progress_bar_text" in st.session_state:
|
||||
st.session_state["progress_bar_text"].error(e)
|
||||
@ -1193,7 +1214,7 @@ def save_video_to_disk(frames, seeds, sanitized_prompt, fps=6,save_video=True, o
|
||||
#
|
||||
def txt2vid(
|
||||
# --------------------------------------
|
||||
# args you probably want to change
|
||||
# args you probably want to change
|
||||
prompts = ["blueberry spaghetti", "strawberry spaghetti"], # prompt to dream about
|
||||
gpu:int = st.session_state['defaults'].general.gpu, # id of the gpu to run on
|
||||
#name:str = 'test', # name of this project, for the output directory
|
||||
@ -1294,10 +1315,10 @@ def txt2vid(
|
||||
if st.session_state.write_info_files:
|
||||
with open(os.path.join(full_path , f'{slugify(str(seeds))}_config.json' if len(prompts) > 1 else "prompts_config.json"), "w") as outfile:
|
||||
outfile.write(json.dumps(
|
||||
dict(
|
||||
prompts = prompts,
|
||||
gpu = gpu,
|
||||
num_steps = num_steps,
|
||||
dict(
|
||||
prompts = prompts,
|
||||
gpu = gpu,
|
||||
num_steps = num_steps,
|
||||
max_duration_in_seconds = max_duration_in_seconds,
|
||||
num_inference_steps = num_inference_steps,
|
||||
cfg_scale = cfg_scale,
|
||||
@ -1315,27 +1336,27 @@ def txt2vid(
|
||||
beta_end = beta_end,
|
||||
beta_schedule = beta_schedule
|
||||
),
|
||||
indent=2,
|
||||
indent=2,
|
||||
sort_keys=False,
|
||||
))
|
||||
))
|
||||
|
||||
#print(scheduler)
|
||||
default_scheduler = PNDMScheduler(
|
||||
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
|
||||
)
|
||||
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
|
||||
)
|
||||
# ------------------------------------------------------------------------------
|
||||
#Schedulers
|
||||
ddim_scheduler = DDIMScheduler(
|
||||
beta_start=beta_start,
|
||||
beta_end=beta_end,
|
||||
beta_start=beta_start,
|
||||
beta_end=beta_end,
|
||||
beta_schedule=beta_schedule,
|
||||
clip_sample=False,
|
||||
set_alpha_to_one=False,
|
||||
)
|
||||
set_alpha_to_one=False,
|
||||
)
|
||||
|
||||
klms_scheduler = LMSDiscreteScheduler(
|
||||
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
|
||||
)
|
||||
beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule
|
||||
)
|
||||
|
||||
SCHEDULERS = dict(default=default_scheduler, ddim=ddim_scheduler, klms=klms_scheduler)
|
||||
|
||||
@ -1415,48 +1436,49 @@ def txt2vid(
|
||||
# preview image works but its not the right way to use this, this also do not work properly as it only makes one image and then exits.
|
||||
#with torch.autocast("cuda"):
|
||||
#StableDiffusionWalkPipeline.__call__(self=server_state["pipe"],
|
||||
#prompt=prompts, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=cfg_scale,
|
||||
#negative_prompt="", num_images_per_prompt=1, eta=0.0,
|
||||
#callback=txt2vid_generation_callback, callback_steps=1,
|
||||
#num_interpolation_steps=num_steps,
|
||||
#fps=30,
|
||||
#image_file_ext = ".png",
|
||||
#output_dir=full_path, # Where images/videos will be saved
|
||||
##name='animals_test', # Subdirectory of output_dir where images/videos will be saved
|
||||
#upsample = False,
|
||||
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
|
||||
#resume = False,
|
||||
#audio_filepath = None,
|
||||
#audio_start_sec = None,
|
||||
#margin = 1.0,
|
||||
#smooth = 0.0, )
|
||||
#prompt=prompts, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=cfg_scale,
|
||||
#negative_prompt="", num_images_per_prompt=1, eta=0.0,
|
||||
#callback=txt2vid_generation_callback, callback_steps=1,
|
||||
#num_interpolation_steps=num_steps,
|
||||
#fps=30,
|
||||
#image_file_ext = ".png",
|
||||
#output_dir=full_path, # Where images/videos will be saved
|
||||
##name='animals_test', # Subdirectory of output_dir where images/videos will be saved
|
||||
#upsample = False,
|
||||
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
|
||||
#resume = False,
|
||||
#audio_filepath = None,
|
||||
#audio_start_sec = None,
|
||||
#margin = 1.0,
|
||||
#smooth = 0.0, )
|
||||
|
||||
# works correctly generating all frames but do not show the preview image
|
||||
# we also do not have control over the generation and cant stop it until the end of it.
|
||||
#with torch.autocast("cuda"):
|
||||
#print (prompts)
|
||||
#video_path = server_state["pipe"].walk(
|
||||
#prompts=prompts,
|
||||
#seeds=seeds,
|
||||
#num_interpolation_steps=num_steps,
|
||||
#height=height, # use multiples of 64 if > 512. Multiples of 8 if < 512.
|
||||
#width=width, # use multiples of 64 if > 512. Multiples of 8 if < 512.
|
||||
#batch_size=4,
|
||||
#fps=30,
|
||||
#image_file_ext = ".png",
|
||||
#eta = 0.0,
|
||||
#output_dir=full_path, # Where images/videos will be saved
|
||||
##name='test', # Subdirectory of output_dir where images/videos will be saved
|
||||
#guidance_scale=cfg_scale, # Higher adheres to prompt more, lower lets model take the wheel
|
||||
#num_inference_steps=num_inference_steps, # Number of diffusion steps per image generated. 50 is good default
|
||||
#upsample = False,
|
||||
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
|
||||
#resume = False,
|
||||
#audio_filepath = None,
|
||||
#audio_start_sec = None,
|
||||
#margin = 1.0,
|
||||
#smooth = 0.0,
|
||||
#callback=txt2vid_generation_callback, # our callback function will be called with the arguments callback(step, timestep, latents)
|
||||
#callback_steps=1 # our callback function will be called once this many steps are processed in a single frame
|
||||
#prompt=prompts,
|
||||
#seeds=seeds,
|
||||
#num_interpolation_steps=num_steps,
|
||||
#height=height, # use multiples of 64 if > 512. Multiples of 8 if < 512.
|
||||
#width=width, # use multiples of 64 if > 512. Multiples of 8 if < 512.
|
||||
#batch_size=4,
|
||||
#fps=30,
|
||||
#image_file_ext = ".png",
|
||||
#eta = 0.0,
|
||||
#output_dir=full_path, # Where images/videos will be saved
|
||||
##name='test', # Subdirectory of output_dir where images/videos will be saved
|
||||
#guidance_scale=cfg_scale, # Higher adheres to prompt more, lower lets model take the wheel
|
||||
#num_inference_steps=num_inference_steps, # Number of diffusion steps per image generated. 50 is good default
|
||||
#upsample = False,
|
||||
##do_loop=do_loop, # Change to True if you want last prompt to loop back to first prompt
|
||||
#resume = False,
|
||||
#audio_filepath = None,
|
||||
#audio_start_sec = None,
|
||||
#margin = 1.0,
|
||||
#smooth = 0.0,
|
||||
#callback=txt2vid_generation_callback, # our callback function will be called with the arguments callback(step, timestep, latents)
|
||||
#callback_steps=1 # our callback function will be called once this many steps are processed in a single frame
|
||||
#)
|
||||
|
||||
# old code
|
||||
@ -1560,11 +1582,11 @@ def txt2vid(
|
||||
time_diff = time.time()- start
|
||||
|
||||
info = f"""
|
||||
{prompts}
|
||||
Sampling Steps: {num_steps}, Sampler: {scheduler}, CFG scale: {cfg_scale}, Seed: {seeds}, Max Duration In Seconds: {max_duration_in_seconds}""".strip()
|
||||
{prompts}
|
||||
Sampling Steps: {num_steps}, Sampler: {scheduler}, CFG scale: {cfg_scale}, Seed: {seeds}, Max Duration In Seconds: {max_duration_in_seconds}""".strip()
|
||||
stats = f'''
|
||||
Took { round(time_diff, 2) }s total ({ round(time_diff/(max_duration_in_seconds),2) }s per image)
|
||||
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%'''
|
||||
Took { round(time_diff, 2) }s total ({ round(time_diff/(max_duration_in_seconds),2) }s per image)
|
||||
Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_048_576) } MiB / { round(mem_max_used/mem_total*100, 3) }%'''
|
||||
|
||||
return video_path, seeds, info, stats
|
||||
|
||||
@ -1590,16 +1612,16 @@ def layout():
|
||||
|
||||
with col1:
|
||||
width = st.slider("Width:", min_value=st.session_state['defaults'].txt2vid.width.min_value, max_value=st.session_state['defaults'].txt2vid.width.max_value,
|
||||
value=st.session_state['defaults'].txt2vid.width.value, step=st.session_state['defaults'].txt2vid.width.step)
|
||||
value=st.session_state['defaults'].txt2vid.width.value, step=st.session_state['defaults'].txt2vid.width.step)
|
||||
height = st.slider("Height:", min_value=st.session_state['defaults'].txt2vid.height.min_value, max_value=st.session_state['defaults'].txt2vid.height.max_value,
|
||||
value=st.session_state['defaults'].txt2vid.height.value, step=st.session_state['defaults'].txt2vid.height.step)
|
||||
value=st.session_state['defaults'].txt2vid.height.value, step=st.session_state['defaults'].txt2vid.height.step)
|
||||
cfg_scale = st.number_input("CFG (Classifier Free Guidance Scale):", min_value=st.session_state['defaults'].txt2vid.cfg_scale.min_value,
|
||||
value=st.session_state['defaults'].txt2vid.cfg_scale.value,
|
||||
value=st.session_state['defaults'].txt2vid.cfg_scale.value,
|
||||
step=st.session_state['defaults'].txt2vid.cfg_scale.step,
|
||||
help="How strongly the image should follow the prompt.")
|
||||
|
||||
#uploaded_images = st.file_uploader("Upload Image", accept_multiple_files=False, type=["png", "jpg", "jpeg", "webp"],
|
||||
#help="Upload an image which will be used for the image to image generation.")
|
||||
#help="Upload an image which will be used for the image to image generation.")
|
||||
seed = st.text_input("Seed:", value=st.session_state['defaults'].txt2vid.seed, help=" The seed to use, if left blank a random seed will be generated.")
|
||||
#batch_count = st.slider("Batch count.", min_value=1, max_value=100, value=st.session_state['defaults'].txt2vid.batch_count,
|
||||
# step=1, help="How many iterations or batches of images to generate in total.")
|
||||
@ -1609,23 +1631,23 @@ def layout():
|
||||
#Default: 1")
|
||||
|
||||
st.session_state["max_duration_in_seconds"] = st.number_input("Max Duration In Seconds:", value=st.session_state['defaults'].txt2vid.max_duration_in_seconds,
|
||||
help="Specify the max duration in seconds you want your video to be.")
|
||||
help="Specify the max duration in seconds you want your video to be.")
|
||||
|
||||
with st.expander("Preview Settings"):
|
||||
#st.session_state["update_preview"] = st.checkbox("Update Image Preview", value=st.session_state['defaults'].txt2vid.update_preview,
|
||||
#help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
#You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
#By default this is enabled and the frequency is set to 1 step.")
|
||||
#help="If enabled the image preview will be updated during the generation instead of at the end. \
|
||||
#You can use the Update Preview \Frequency option bellow to customize how frequent it's updated. \
|
||||
#By default this is enabled and the frequency is set to 1 step.")
|
||||
|
||||
st.session_state["update_preview"] = st.session_state["defaults"].general.update_preview
|
||||
st.session_state["update_preview_frequency"] = st.number_input("Update Image Preview Frequency",
|
||||
min_value=0,
|
||||
min_value=0,
|
||||
value=st.session_state['defaults'].txt2vid.update_preview_frequency,
|
||||
help="Frequency in steps at which the the preview image is updated. By default the frequency \
|
||||
is set to 1 step.")
|
||||
|
||||
st.session_state["dynamic_preview_frequency"] = st.checkbox("Dynamic Preview Frequency", value=st.session_state['defaults'].txt2vid.dynamic_preview_frequency,
|
||||
help="This option tries to find the best value at which we can update \
|
||||
help="This option tries to find the best value at which we can update \
|
||||
the preview image during generation while minimizing the impact it has in performance. Default: True")
|
||||
|
||||
|
||||
@ -1666,7 +1688,7 @@ def layout():
|
||||
custom_models_available()
|
||||
if server_state["CustomModel_available"]:
|
||||
custom_model = st.selectbox("Custom Model:", st.session_state["defaults"].txt2vid.custom_models_list,
|
||||
index=st.session_state["defaults"].txt2vid.custom_models_list.index(st.session_state["defaults"].txt2vid.default_model),
|
||||
index=st.session_state["defaults"].txt2vid.custom_models_list.index(st.session_state["defaults"].txt2vid.default_model),
|
||||
help="Select the model you want to use. This option is only available if you have custom models \
|
||||
on your 'models/custom' folder. The model name that will be shown here is the same as the name\
|
||||
the file for the model has on said folder, it is recommended to give the .ckpt file a name that \
|
||||
@ -1680,11 +1702,11 @@ def layout():
|
||||
#st.session_state["weights_path"] = f"CompVis/{slugify(custom_model.lower())}"
|
||||
|
||||
st.session_state.sampling_steps = st.number_input("Sampling Steps", value=st.session_state['defaults'].txt2vid.sampling_steps.value,
|
||||
min_value=st.session_state['defaults'].txt2vid.sampling_steps.min_value,
|
||||
min_value=st.session_state['defaults'].txt2vid.sampling_steps.min_value,
|
||||
step=st.session_state['defaults'].txt2vid.sampling_steps.step, help="Number of steps between each pair of sampled points")
|
||||
|
||||
st.session_state.num_inference_steps = st.number_input("Inference Steps:", value=st.session_state['defaults'].txt2vid.num_inference_steps.value,
|
||||
min_value=st.session_state['defaults'].txt2vid.num_inference_steps.min_value,
|
||||
min_value=st.session_state['defaults'].txt2vid.num_inference_steps.min_value,
|
||||
step=st.session_state['defaults'].txt2vid.num_inference_steps.step,
|
||||
help="Higher values (e.g. 100, 200 etc) can create better images.")
|
||||
|
||||
@ -1693,11 +1715,11 @@ def layout():
|
||||
#index=sampler_name_list.index(st.session_state['defaults'].txt2vid.default_sampler), help="Sampling method to use. Default: k_euler")
|
||||
scheduler_name_list = ["klms", "ddim"]
|
||||
scheduler_name = st.selectbox("Scheduler:", scheduler_name_list,
|
||||
index=scheduler_name_list.index(st.session_state['defaults'].txt2vid.scheduler_name), help="Scheduler to use. Default: klms")
|
||||
index=scheduler_name_list.index(st.session_state['defaults'].txt2vid.scheduler_name), help="Scheduler to use. Default: klms")
|
||||
|
||||
beta_scheduler_type_list = ["scaled_linear", "linear"]
|
||||
beta_scheduler_type = st.selectbox("Beta Schedule Type:", beta_scheduler_type_list,
|
||||
index=beta_scheduler_type_list.index(st.session_state['defaults'].txt2vid.beta_scheduler_type), help="Schedule Type to use. Default: linear")
|
||||
index=beta_scheduler_type_list.index(st.session_state['defaults'].txt2vid.beta_scheduler_type), help="Schedule Type to use. Default: linear")
|
||||
|
||||
|
||||
#basic_tab, advanced_tab = st.tabs(["Basic", "Advanced"])
|
||||
@ -1709,32 +1731,32 @@ def layout():
|
||||
with st.expander("Advanced"):
|
||||
with st.expander("Output Settings"):
|
||||
st.session_state["separate_prompts"] = st.checkbox("Create Prompt Matrix.", value=st.session_state['defaults'].txt2vid.separate_prompts,
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
help="Separate multiple prompts using the `|` character, and get all combinations of them.")
|
||||
st.session_state["normalize_prompt_weights"] = st.checkbox("Normalize Prompt Weights.",
|
||||
value=st.session_state['defaults'].txt2vid.normalize_prompt_weights, help="Ensure the sum of all weights add up to 1.0")
|
||||
value=st.session_state['defaults'].txt2vid.normalize_prompt_weights, help="Ensure the sum of all weights add up to 1.0")
|
||||
|
||||
st.session_state["save_individual_images"] = st.checkbox("Save individual images.",
|
||||
value=st.session_state['defaults'].txt2vid.save_individual_images,
|
||||
value=st.session_state['defaults'].txt2vid.save_individual_images,
|
||||
help="Save each image generated before any filter or enhancement is applied.")
|
||||
|
||||
st.session_state["save_video"] = st.checkbox("Save video",value=st.session_state['defaults'].txt2vid.save_video,
|
||||
help="Save a video with all the images generated as frames at the end of the generation.")
|
||||
help="Save a video with all the images generated as frames at the end of the generation.")
|
||||
|
||||
save_video_on_stop = st.checkbox("Save video on Stop",value=st.session_state['defaults'].txt2vid.save_video_on_stop,
|
||||
help="Save a video with all the images generated as frames when we hit the stop button during a generation.")
|
||||
help="Save a video with all the images generated as frames when we hit the stop button during a generation.")
|
||||
|
||||
st.session_state["group_by_prompt"] = st.checkbox("Group results by prompt", value=st.session_state['defaults'].txt2vid.group_by_prompt,
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt \
|
||||
help="Saves all the images with the same prompt into the same folder. When using a prompt \
|
||||
matrix each prompt combination will have its own folder.")
|
||||
|
||||
st.session_state["write_info_files"] = st.checkbox("Write Info file", value=st.session_state['defaults'].txt2vid.write_info_files,
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
help="Save a file next to the image with informartion about the generation.")
|
||||
|
||||
st.session_state["do_loop"] = st.checkbox("Do Loop", value=st.session_state['defaults'].txt2vid.do_loop,
|
||||
help="Loop the prompt making two prompts from a single one.")
|
||||
help="Loop the prompt making two prompts from a single one.")
|
||||
|
||||
st.session_state["use_lerp_for_text"] = st.checkbox("Use Lerp Instead of Slerp", value=st.session_state['defaults'].txt2vid.use_lerp_for_text,
|
||||
help="Uses torch.lerp() instead of slerp. When interpolating between related prompts. \
|
||||
help="Uses torch.lerp() instead of slerp. When interpolating between related prompts. \
|
||||
e.g. 'a lion in a grassy meadow' -> 'a bear in a grassy meadow' tends to keep the meadow \
|
||||
the whole way through when lerped, but slerping will often find a path where the meadow \
|
||||
disappears in the middle")
|
||||
@ -1761,12 +1783,12 @@ def layout():
|
||||
#if st.session_state["GFPGAN_available"]:
|
||||
#with st.expander("GFPGAN"):
|
||||
st.session_state["use_GFPGAN"] = st.checkbox("Use GFPGAN", value=st.session_state['defaults'].txt2vid.use_GFPGAN,
|
||||
help="Uses the GFPGAN model to improve faces after the generation.\
|
||||
help="Uses the GFPGAN model to improve faces after the generation.\
|
||||
This greatly improve the quality and consistency of faces but uses\
|
||||
extra VRAM. Disable if you need the extra VRAM.")
|
||||
|
||||
st.session_state["GFPGAN_model"] = st.selectbox("GFPGAN model", st.session_state["GFPGAN_models"],
|
||||
index=st.session_state["GFPGAN_models"].index(st.session_state['defaults'].general.GFPGAN_model))
|
||||
index=st.session_state["GFPGAN_models"].index(st.session_state['defaults'].general.GFPGAN_model))
|
||||
|
||||
#st.session_state["GFPGAN_strenght"] = st.slider("Effect Strenght", min_value=1, max_value=100, value=1, step=1, help='')
|
||||
|
||||
@ -1785,7 +1807,9 @@ def layout():
|
||||
upscaling_method_list.append("LDSR")
|
||||
|
||||
st.session_state["upscaling_method"] = st.selectbox("Upscaling Method", upscaling_method_list,
|
||||
index=upscaling_method_list.index(st.session_state['defaults'].general.upscaling_method))
|
||||
index=upscaling_method_list.index(st.session_state['defaults'].general.upscaling_method)
|
||||
if st.session_state['defaults'].general.upscaling_method in upscaling_method_list
|
||||
else 0)
|
||||
|
||||
if st.session_state["RealESRGAN_available"]:
|
||||
with st.expander("RealESRGAN"):
|
||||
@ -1795,7 +1819,7 @@ def layout():
|
||||
st.session_state["use_RealESRGAN"] = False
|
||||
|
||||
st.session_state["RealESRGAN_model"] = st.selectbox("RealESRGAN model", st.session_state["RealESRGAN_models"],
|
||||
index=st.session_state["RealESRGAN_models"].index(st.session_state['defaults'].general.RealESRGAN_model))
|
||||
index=st.session_state["RealESRGAN_models"].index(st.session_state['defaults'].general.RealESRGAN_model))
|
||||
else:
|
||||
st.session_state["use_RealESRGAN"] = False
|
||||
st.session_state["RealESRGAN_model"] = "RealESRGAN_x4plus"
|
||||
@ -1810,20 +1834,20 @@ def layout():
|
||||
st.session_state["use_LDSR"] = False
|
||||
|
||||
st.session_state["LDSR_model"] = st.selectbox("LDSR model", st.session_state["LDSR_models"],
|
||||
index=st.session_state["LDSR_models"].index(st.session_state['defaults'].general.LDSR_model))
|
||||
index=st.session_state["LDSR_models"].index(st.session_state['defaults'].general.LDSR_model))
|
||||
|
||||
st.session_state["ldsr_sampling_steps"] = st.number_input("Sampling Steps", value=st.session_state['defaults'].txt2vid.LDSR_config.sampling_steps,
|
||||
help="")
|
||||
help="")
|
||||
|
||||
st.session_state["preDownScale"] = st.number_input("PreDownScale", value=st.session_state['defaults'].txt2vid.LDSR_config.preDownScale,
|
||||
help="")
|
||||
help="")
|
||||
|
||||
st.session_state["postDownScale"] = st.number_input("postDownScale", value=st.session_state['defaults'].txt2vid.LDSR_config.postDownScale,
|
||||
help="")
|
||||
help="")
|
||||
|
||||
downsample_method_list = ['Nearest', 'Lanczos']
|
||||
st.session_state["downsample_method"] = st.selectbox("Downsample Method", downsample_method_list,
|
||||
index=downsample_method_list.index(st.session_state['defaults'].txt2vid.LDSR_config.downsample_method))
|
||||
index=downsample_method_list.index(st.session_state['defaults'].txt2vid.LDSR_config.downsample_method))
|
||||
|
||||
else:
|
||||
st.session_state["use_LDSR"] = False
|
||||
@ -1831,20 +1855,20 @@ def layout():
|
||||
|
||||
with st.expander("Variant"):
|
||||
st.session_state["variant_amount"] = st.number_input("Variant Amount:", value=st.session_state['defaults'].txt2vid.variant_amount.value,
|
||||
min_value=st.session_state['defaults'].txt2vid.variant_amount.min_value,
|
||||
min_value=st.session_state['defaults'].txt2vid.variant_amount.min_value,
|
||||
max_value=st.session_state['defaults'].txt2vid.variant_amount.max_value,
|
||||
step=st.session_state['defaults'].txt2vid.variant_amount.step)
|
||||
|
||||
st.session_state["variant_seed"] = st.text_input("Variant Seed:", value=st.session_state['defaults'].txt2vid.seed,
|
||||
help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
help="The seed to use when generating a variant, if left blank a random seed will be generated.")
|
||||
|
||||
#st.session_state["beta_start"] = st.slider("Beta Start:", value=st.session_state['defaults'].txt2vid.beta_start.value,
|
||||
#min_value=st.session_state['defaults'].txt2vid.beta_start.min_value,
|
||||
#max_value=st.session_state['defaults'].txt2vid.beta_start.max_value,
|
||||
#step=st.session_state['defaults'].txt2vid.beta_start.step, format=st.session_state['defaults'].txt2vid.beta_start.format)
|
||||
#min_value=st.session_state['defaults'].txt2vid.beta_start.min_value,
|
||||
#max_value=st.session_state['defaults'].txt2vid.beta_start.max_value,
|
||||
#step=st.session_state['defaults'].txt2vid.beta_start.step, format=st.session_state['defaults'].txt2vid.beta_start.format)
|
||||
#st.session_state["beta_end"] = st.slider("Beta End:", value=st.session_state['defaults'].txt2vid.beta_end.value,
|
||||
#min_value=st.session_state['defaults'].txt2vid.beta_end.min_value, max_value=st.session_state['defaults'].txt2vid.beta_end.max_value,
|
||||
#step=st.session_state['defaults'].txt2vid.beta_end.step, format=st.session_state['defaults'].txt2vid.beta_end.format)
|
||||
#min_value=st.session_state['defaults'].txt2vid.beta_end.min_value, max_value=st.session_state['defaults'].txt2vid.beta_end.max_value,
|
||||
#step=st.session_state['defaults'].txt2vid.beta_end.step, format=st.session_state['defaults'].txt2vid.beta_end.format)
|
||||
|
||||
if generate_button:
|
||||
#print("Loading models")
|
||||
@ -1873,7 +1897,7 @@ def layout():
|
||||
#try:
|
||||
# run video generation
|
||||
video, seed, info, stats = txt2vid(prompts=prompt, gpu=st.session_state["defaults"].general.gpu,
|
||||
num_steps=st.session_state.sampling_steps, max_duration_in_seconds=st.session_state.max_duration_in_seconds,
|
||||
num_steps=st.session_state.sampling_steps, max_duration_in_seconds=st.session_state.max_duration_in_seconds,
|
||||
num_inference_steps=st.session_state.num_inference_steps,
|
||||
cfg_scale=cfg_scale, save_video_on_stop=save_video_on_stop,
|
||||
outdir=st.session_state["defaults"].general.outdir,
|
||||
|
@ -19,9 +19,8 @@
|
||||
|
||||
# We import hydralit like this to replace the previous stuff
|
||||
# we had with native streamlit as it lets ur replace things 1:1
|
||||
#import hydralit as st
|
||||
import collections.abc
|
||||
from sd_utils import *
|
||||
from sd_utils import st, hc, load_configs, load_css, set_logger_verbosity,\
|
||||
logger, quiesce_logger, set_page_title, threading, random
|
||||
|
||||
# streamlit imports
|
||||
import streamlit_nested_layout
|
||||
@ -31,12 +30,8 @@ import streamlit_nested_layout
|
||||
from streamlit_server_state import server_state, server_state_lock
|
||||
|
||||
#other imports
|
||||
|
||||
import warnings
|
||||
import os, toml
|
||||
import k_diffusion as K
|
||||
from omegaconf import OmegaConf
|
||||
import argparse
|
||||
from sd_utils.bridge import run_bridge
|
||||
|
||||
# import custom components
|
||||
from custom_components import draggable_number_input
|
||||
@ -74,38 +69,6 @@ opt = parser.parse_args()
|
||||
with server_state_lock["bridge"]:
|
||||
server_state["bridge"] = opt.bridge
|
||||
|
||||
try:
|
||||
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
||||
from transformers import logging
|
||||
|
||||
logging.set_verbosity_error()
|
||||
except:
|
||||
pass
|
||||
|
||||
# remove some annoying deprecation warnings that show every now and then.
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
|
||||
# this should force GFPGAN and RealESRGAN onto the selected gpu as well
|
||||
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
|
||||
#os.environ["CUDA_VISIBLE_DEVICES"] = str(st.session_state["defaults"].general.gpu)
|
||||
|
||||
|
||||
# functions to load css locally OR remotely starts here. Options exist for future flexibility. Called as st.markdown with unsafe_allow_html as css injection
|
||||
# TODO, maybe look into async loading the file especially for remote fetching
|
||||
def local_css(file_name):
|
||||
with open(file_name) as f:
|
||||
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
|
||||
|
||||
def remote_css(url):
|
||||
st.markdown(f'<link href="{url}" rel="stylesheet">', unsafe_allow_html=True)
|
||||
|
||||
def load_css(isLocal, nameOrURL):
|
||||
if(isLocal):
|
||||
local_css(nameOrURL)
|
||||
else:
|
||||
remote_css(nameOrURL)
|
||||
|
||||
@logger.catch(reraise=True)
|
||||
def layout():
|
||||
"""Layout functions to define all the streamlit layout here."""
|
||||
@ -115,29 +78,25 @@ def layout():
|
||||
#app = st.HydraApp(title='Stable Diffusion WebUI', favicon="", sidebar_state="expanded", layout="wide",
|
||||
#hide_streamlit_markers=False, allow_url_nav=True , clear_cross_app_sessions=False)
|
||||
|
||||
with st.empty():
|
||||
# load css as an external file, function has an option to local or remote url. Potential use when running from cloud infra that might not have access to local path.
|
||||
load_css(True, 'frontend/css/streamlit.main.css')
|
||||
|
||||
# load css as an external file, function has an option to local or remote url. Potential use when running from cloud infra that might not have access to local path.
|
||||
load_css(True, 'frontend/css/streamlit.main.css')
|
||||
|
||||
#
|
||||
# specify the primary menu definition
|
||||
menu_data = [
|
||||
{'id': 'Stable Diffusion', 'label': 'Stable Diffusion', 'icon': 'bi bi-grid-1x2-fill'},
|
||||
{'id': 'Textual Inversion', 'label': 'Textual Inversion', 'icon': 'bi bi-lightbulb-fill'},
|
||||
{'id': 'Train','label':"Train", 'icon': "bi bi-lightbulb-fill", 'submenu':[
|
||||
{'id': 'Textual Inversion', 'label': 'Textual Inversion', 'icon': 'bi bi-lightbulb-fill'},
|
||||
{'id': 'Fine Tunning', 'label': 'Fine Tunning', 'icon': 'bi bi-lightbulb-fill'},
|
||||
]},
|
||||
{'id': 'Model Manager', 'label': 'Model Manager', 'icon': 'bi bi-cloud-arrow-down-fill'},
|
||||
{'id': 'Tools','label':"Tools", 'icon': "bi bi-tools", 'submenu':[
|
||||
{'id': 'API Server', 'label': 'API Server', 'icon': 'bi bi-server'},
|
||||
#{'id': 'Barfi/BaklavaJS', 'label': 'Barfi/BaklavaJS', 'icon': 'bi bi-diagram-3-fill'},
|
||||
{'id': 'Barfi/BaklavaJS', 'label': 'Barfi/BaklavaJS', 'icon': 'bi bi-diagram-3-fill'},
|
||||
#{'id': 'API Server', 'label': 'API Server', 'icon': 'bi bi-server'},
|
||||
]},
|
||||
{'id': 'Settings', 'label': 'Settings', 'icon': 'bi bi-gear-fill'},
|
||||
#{'icon': "fa-solid fa-radar",'label':"Dropdown1", 'submenu':[
|
||||
# {'id':' subid11','icon': "fa fa-paperclip", 'label':"Sub-item 1"},{'id':'subid12','icon': "💀", 'label':"Sub-item 2"},{'id':'subid13','icon': "fa fa-database", 'label':"Sub-item 3"}]},
|
||||
#{'icon': "far fa-chart-bar", 'label':"Chart"},#no tooltip message
|
||||
#{'id':' Crazy return value 💀','icon': "💀", 'label':"Calendar"},
|
||||
#{'icon': "fas fa-tachometer-alt", 'label':"Dashboard",'ttip':"I'm the Dashboard tooltip!"}, #can add a tooltip message
|
||||
#{'icon': "far fa-copy", 'label':"Right End"},
|
||||
#{'icon': "fa-solid fa-radar",'label':"Dropdown2", 'submenu':[{'label':"Sub-item 1", 'icon': "fa fa-meh"},{'label':"Sub-item 2"},{'icon':'🙉','label':"Sub-item 3",}]},
|
||||
]
|
||||
|
||||
over_theme = {'txc_inactive': '#FFFFFF', "menu_background":'#000000'}
|
||||
@ -152,29 +111,6 @@ def layout():
|
||||
sticky_mode='pinned',
|
||||
)
|
||||
|
||||
# check if the models exist on their respective folders
|
||||
with server_state_lock["GFPGAN_available"]:
|
||||
if os.path.exists(os.path.join(st.session_state["defaults"].general.GFPGAN_dir, f"{st.session_state['defaults'].general.GFPGAN_model}.pth")):
|
||||
server_state["GFPGAN_available"] = True
|
||||
else:
|
||||
server_state["GFPGAN_available"] = False
|
||||
|
||||
with server_state_lock["RealESRGAN_available"]:
|
||||
if os.path.exists(os.path.join(st.session_state["defaults"].general.RealESRGAN_dir, f"{st.session_state['defaults'].general.RealESRGAN_model}.pth")):
|
||||
server_state["RealESRGAN_available"] = True
|
||||
else:
|
||||
server_state["RealESRGAN_available"] = False
|
||||
|
||||
#with st.sidebar:
|
||||
#page = on_hover_tabs(tabName=['Stable Diffusion', "Textual Inversion","Model Manager","Settings"],
|
||||
#iconName=['dashboard','model_training' ,'cloud_download', 'settings'], default_choice=0)
|
||||
|
||||
# need to see how to get the icons to show for the hydralit option_bar
|
||||
#page = hc.option_bar([{'icon':'grid-outline','label':'Stable Diffusion'}, {'label':"Textual Inversion"},
|
||||
#{'label':"Model Manager"},{'label':"Settings"}],
|
||||
#horizontal_orientation=False,
|
||||
#override_theme={'txc_inactive': 'white','menu_background':'#111', 'stVerticalBlock': '#111','txc_active':'yellow','option_active':'blue'})
|
||||
|
||||
#
|
||||
#if menu_id == "Home":
|
||||
#st.info("Under Construction. :construction_worker:")
|
||||
@ -187,9 +123,10 @@ def layout():
|
||||
except NameError:
|
||||
st.experimental_rerun()
|
||||
|
||||
txt2img_tab, img2img_tab, txt2vid_tab, img2txt_tab, concept_library_tab = st.tabs(["Text-to-Image", "Image-to-Image",
|
||||
"Text-to-Video", "Image-To-Text",
|
||||
"Concept Library"])
|
||||
txt2img_tab, img2img_tab, txt2vid_tab, img2txt_tab, post_processing_tab, concept_library_tab = st.tabs(["Text-to-Image", "Image-to-Image",
|
||||
#"Inpainting",
|
||||
"Text-to-Video", "Image-To-Text",
|
||||
"Post-Processing","Concept Library"])
|
||||
#with home_tab:
|
||||
#from home import layout
|
||||
#layout()
|
||||
@ -214,6 +151,10 @@ def layout():
|
||||
from img2txt import layout
|
||||
layout()
|
||||
|
||||
with post_processing_tab:
|
||||
from post_processing import layout
|
||||
layout()
|
||||
|
||||
with concept_library_tab:
|
||||
from sd_concept_library import layout
|
||||
layout()
|
||||
@ -229,15 +170,20 @@ def layout():
|
||||
from textual_inversion import layout
|
||||
layout()
|
||||
|
||||
elif menu_id == 'Fine Tunning':
|
||||
#from textual_inversion import layout
|
||||
#layout()
|
||||
st.info("Under Construction. :construction_worker:")
|
||||
|
||||
elif menu_id == 'API Server':
|
||||
set_page_title("API Server - Stable Diffusion Playground")
|
||||
from APIServer import layout
|
||||
layout()
|
||||
|
||||
#elif menu_id == 'Barfi/BaklavaJS':
|
||||
#set_page_title("Barfi/BaklavaJS - Stable Diffusion Playground")
|
||||
#from barfi_baklavajs import layout
|
||||
#layout()
|
||||
elif menu_id == 'Barfi/BaklavaJS':
|
||||
set_page_title("Barfi/BaklavaJS - Stable Diffusion Playground")
|
||||
from barfi_baklavajs import layout
|
||||
layout()
|
||||
|
||||
elif menu_id == 'Settings':
|
||||
set_page_title("Settings - Stable Diffusion Playground")
|
||||
|
33
sidebars.js
Normal file
@ -0,0 +1,33 @@
|
||||
/**
|
||||
* Creating a sidebar enables you to:
|
||||
- create an ordered group of docs
|
||||
- render a sidebar for each doc of that group
|
||||
- provide next/previous navigation
|
||||
|
||||
The sidebars can be generated from the filesystem, or explicitly defined here.
|
||||
|
||||
Create as many sidebars as you want.
|
||||
*/
|
||||
|
||||
// @ts-check
|
||||
|
||||
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
|
||||
const sidebars = {
|
||||
// By default, Docusaurus generates a sidebar from the docs folder structure
|
||||
tutorialSidebar: [{type: 'autogenerated', dirName: '.'}],
|
||||
|
||||
// But you can create a sidebar manually
|
||||
/*
|
||||
tutorialSidebar: [
|
||||
'intro',
|
||||
'hello',
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Tutorial',
|
||||
items: ['tutorial-basics/create-a-document'],
|
||||
},
|
||||
],
|
||||
*/
|
||||
};
|
||||
|
||||
module.exports = sidebars;
|
66
src/components/HomepageFeatures/index.js
Normal file
@ -0,0 +1,66 @@
|
||||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
import styles from './styles.module.css';
|
||||
|
||||
const FeatureList = [
|
||||
|
||||
/*
|
||||
{
|
||||
title: 'Easy to Use',
|
||||
Svg: require('@site/images/undraw_docusaurus_mountain.svg').default,
|
||||
description: (
|
||||
<>
|
||||
Docusaurus was designed from the ground up to be easily installed and
|
||||
used to get your website up and running quickly.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'Focus on What Matters',
|
||||
Svg: require('@site/images/undraw_docusaurus_tree.svg').default,
|
||||
description: (
|
||||
<>
|
||||
Docusaurus lets you focus on your docs, and we'll do the chores. Go
|
||||
ahead and move your docs into the <code>docs</code> directory.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'Powered by React',
|
||||
Svg: require('@site/images/undraw_docusaurus_react.svg').default,
|
||||
description: (
|
||||
<>
|
||||
Extend or customize your website layout by reusing React. Docusaurus can
|
||||
be extended while reusing the same header and footer.
|
||||
</>
|
||||
),
|
||||
},*/
|
||||
];
|
||||
|
||||
function Feature({Svg, title, description}) {
|
||||
return (
|
||||
<div className={clsx('col col--4')}>
|
||||
<div className="text--center">
|
||||
<Svg className={styles.featureSvg} role="img" />
|
||||
</div>
|
||||
<div className="text--center padding-horiz--md">
|
||||
<h3>{title}</h3>
|
||||
<p>{description}</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function HomepageFeatures() {
|
||||
return (
|
||||
<section className={styles.features}>
|
||||
<div className="container">
|
||||
<div className="row">
|
||||
{FeatureList.map((props, idx) => (
|
||||
<Feature key={idx} {...props} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
}
|
11
src/components/HomepageFeatures/styles.module.css
Normal file
@ -0,0 +1,11 @@
|
||||
.features {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 2rem 0;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.featureSvg {
|
||||
height: 200px;
|
||||
width: 200px;
|
||||
}
|
41
src/pages/index.js
Normal file
@ -0,0 +1,41 @@
|
||||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
import Link from '@docusaurus/Link';
|
||||
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
|
||||
import Layout from '@theme/Layout';
|
||||
import HomepageFeatures from '@site/src/components/HomepageFeatures';
|
||||
|
||||
import styles from './index.module.css';
|
||||
|
||||
function HomepageHeader() {
|
||||
const {siteConfig} = useDocusaurusContext();
|
||||
return (
|
||||
<header className={clsx('hero hero--primary', styles.heroBanner)}>
|
||||
<div className="container">
|
||||
<h1 className="hero__title">{siteConfig.title}</h1>
|
||||
<p className="hero__subtitle">{siteConfig.tagline}</p>
|
||||
<div className={styles.buttons}>
|
||||
<Link
|
||||
className="button button--secondary button--lg"
|
||||
to="docs/Installation/one-click-installer">
|
||||
Get Started
|
||||
</Link>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
);
|
||||
}
|
||||
|
||||
export default function Home() {
|
||||
const {siteConfig} = useDocusaurusContext();
|
||||
return (
|
||||
<Layout
|
||||
title={`${siteConfig.title}`}
|
||||
description="Web-based UI for Stable Diffusion">
|
||||
<HomepageHeader />
|
||||
<main>
|
||||
<HomepageFeatures />
|
||||
</main>
|
||||
</Layout>
|
||||
);
|
||||
}
|
23
src/pages/index.module.css
Normal file
@ -0,0 +1,23 @@
|
||||
/**
|
||||
* CSS files with the .module.css suffix will be treated as CSS modules
|
||||
* and scoped locally.
|
||||
*/
|
||||
|
||||
.heroBanner {
|
||||
padding: 4rem 0;
|
||||
text-align: center;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 996px) {
|
||||
.heroBanner {
|
||||
padding: 2rem;
|
||||
}
|
||||
}
|
||||
|
||||
.buttons {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
7
src/pages/markdown-page.md
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
title: Markdown page example
|
||||
---
|
||||
|
||||
# Markdown page example
|
||||
|
||||
You don't need React to write simple standalone pages.
|
10
webui.cmd
@ -31,7 +31,11 @@ IF EXIST custom-conda-path.txt (
|
||||
FOR /F %%i IN (custom-conda-path.txt) DO set v_custom_path=%%i
|
||||
)
|
||||
|
||||
set v_paths=%ProgramData%\miniconda3
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set v_paths=%INSTALL_ENV_DIR%
|
||||
set v_paths=%v_paths%;%ProgramData%\miniconda3
|
||||
set v_paths=%v_paths%;%USERPROFILE%\miniconda3
|
||||
set v_paths=%v_paths%;%ProgramData%\anaconda3
|
||||
set v_paths=%v_paths%;%USERPROFILE%\anaconda3
|
||||
@ -99,10 +103,10 @@ call "%v_conda_path%\Scripts\activate.bat" "%v_conda_env_name%"
|
||||
:PROMPT
|
||||
set SETUPTOOLS_USE_DISTUTILS=stdlib
|
||||
IF EXIST "models\ldm\stable-diffusion-v1\Stable Diffusion v1.5.ckpt" (
|
||||
python -m streamlit run scripts\webui_streamlit.py --theme.base dark --server.address localhost
|
||||
python -m streamlit run scripts\webui_streamlit.py --theme.base dark
|
||||
) ELSE (
|
||||
echo Your model file does not exist! Once the WebUI launches please visit the Model Manager page and download the models by using the Download button for each model.
|
||||
python -m streamlit run scripts\webui_streamlit.py --theme.base dark --server.address localhost
|
||||
python -m streamlit run scripts\webui_streamlit.py --theme.base dark
|
||||
)
|
||||
|
||||
::cmd /k
|
||||
|
12
webui.sh
@ -24,6 +24,9 @@ ENV_MODIFIED=$(date -r $ENV_FILE "+%s")
|
||||
ENV_MODIFED_FILE=".env_updated"
|
||||
ENV_UPDATED=0
|
||||
|
||||
INSTALL_ENV_DIR="$(pwd)/../installer_files/env" # since linux-sd.sh clones the repo into a subfolder
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# Models used for upscaling
|
||||
GFPGAN_MODEL="https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth"
|
||||
LATENT_DIFFUSION_REPO="https://github.com/devilismyfriend/latent-diffusion.git"
|
||||
@ -50,6 +53,11 @@ conda_env_setup () {
|
||||
CUSTOM_CONDA_PATH=$(cat custom-conda-path.txt)
|
||||
fi
|
||||
|
||||
# If a custom conda isn't specified, and the installer downloaded conda for the user, then use that
|
||||
if [ -f "$INSTALL_ENV_DIR/etc/profile.d/conda.sh" ] && [ "$CUSTOM_CONDA_PATH" == "" ]; then
|
||||
. "$INSTALL_ENV_DIR/etc/profile.d/conda.sh"
|
||||
fi
|
||||
|
||||
# If custom path is set above, try to setup conda environment
|
||||
if [ -f "${CUSTOM_CONDA_PATH}/etc/profile.d/conda.sh" ]; then
|
||||
. "${CUSTOM_CONDA_PATH}/etc/profile.d/conda.sh"
|
||||
@ -148,12 +156,12 @@ launch_webui () {
|
||||
done
|
||||
printf "\n\n########## LAUNCH USING STREAMLIT OR GRADIO? ##########\n\n"
|
||||
printf "Do you wish to run the WebUI using the Gradio or StreamLit Interface?\n\n"
|
||||
printf "Streamlit: \nHas A More Modern UI \nMore Features Planned \nWill Be The Main UI Going Forward \nCurrently In Active Development \nMissing Some Gradio Features\n\n"
|
||||
printf "Streamlit: \nHas A More Modern UI \nMore Features Planned \nWill Be The Main UI Going Forward \nCurrently In Active Development \n\n"
|
||||
printf "Gradio: \nCurrently Feature Complete \nUses An Older Interface Style \nWill Not Receive Major Updates\n\n"
|
||||
printf "Which Version of the WebUI Interface do you wish to use?\n"
|
||||
select yn in "Streamlit" "Gradio"; do
|
||||
case $yn in
|
||||
Streamlit ) printf "\nStarting Stable Diffusion WebUI: Streamlit Interface. Please Wait...\n"; python -m streamlit run scripts/webui_streamlit.py --theme.base dark --server.address localhost; break;;
|
||||
Streamlit ) printf "\nStarting Stable Diffusion WebUI: Streamlit Interface. Please Wait...\n"; python -m streamlit run scripts/webui_streamlit.py; break;;
|
||||
Gradio ) printf "\nStarting Stable Diffusion WebUI: Gradio Interface. Please Wait...\n"; python scripts/relauncher.py "$@"; break;;
|
||||
esac
|
||||
done
|
||||
|