Merge branch 'sd-webui:dev' into dev

This commit is contained in:
Alejandro Gil 2022-10-02 11:00:24 -07:00 committed by GitHub
commit fdf600b49a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 2409 additions and 30 deletions

3
.gitignore vendored
View File

@ -64,4 +64,5 @@ condaenv.*.requirements.txt
/gfpgan/*
/models/*
z_version_env.tmp
/user_data/*
scripts/bridgeData.py
/user_data/*

View File

@ -8,7 +8,6 @@
- **[Windows](https://sd-webui.github.io/stable-diffusion-webui/docs/1.windows-installation.html)**
- **[Linux](https://sd-webui.github.io/stable-diffusion-webui/docs/2.linux-installation.html)**
### Want to ask a question or request a feature?
Come to our [Discord Server](https://discord.gg/gyXNe4NySY) or use [Discussions](https://github.com/sd-webui/stable-diffusion-webui/discussions).

View File

@ -0,0 +1,40 @@
// blend it together and finish it with details
prompt: cute happy orange cat sitting at beach, beach in background, trending on artstation:1 cute happy cat:1
sampler_name:k_euler_a
ddim_steps: 35
denoising_strength: 0.55
variation: 3
initial_seed: 1
# put foreground onto background
size: 512, 512
color: 0,0,0
## create foreground
size:512,512
color:0,0,0,0
resize: 300, 300
pos: 256, 350
// select mask by probing some pixels from the image
mask_by_color_at: 15, 15, 15, 256, 85, 465, 100, 480
mask_by_color_threshold:80
mask_by_color_space: HLS
// some pixels inside the cat may be selected, remove them with mask_open
mask_open: 15
// there is still some background pixels left at the edge between cat and background
// grow the mask to get them as well
mask_grow: 15
// we want to remove whatever is masked:
mask_invert: True
####
prompt: cute happy orange cat, white background
ddim_steps: 25
variation: 1
## create background
prompt:beach landscape, beach with ocean in background, photographic, beautiful:1 red:-0.4

View File

@ -0,0 +1,50 @@
initial_seed: 2
// select background and img2img over it
mask_by_color_at: 64, 64
mask_invert: True
prompt: corgi
ddim_steps: 50
seed: 242886303
mask_mode: 0
denoising_strength: 0.8
//cfg_scale: 15
mask_restore: True
image_editor_mode:Mask
# estimate depth and transform the corgi in 3d
transform3d: True
transform3d_depth_near: 0.5
transform3d_depth_scale: 10
transform3d_from_hfov: 45
transform3d_to_hfov: 45
transform3d_from_pose: 0,0,0, 0,0,0
transform3d_to_pose: 0.5,0,0, 0,-5,0
transform3d_min_mask: 0
transform3d_max_mask: 255
transform3d_inpaint_radius: 1
transform3d_inpaint_method: 0
## put foreground onto background
size: 512, 512
### create foreground
size: 512, 512
mask_depth: True
mask_depth_model: 1
mask_depth_min: -0.05
mask_depth_max: 0.5
mask_depth_invert:False
####
prompt: corgi
ddim_steps: 25
seed: 242886303
### background
size: 512,512
color: #9F978D

View File

@ -0,0 +1,25 @@
// blend it together and finish it with some details
prompt: cute corgi at beach, trending on artstation
ddim_steps: 50
denoising_strength: 0.5
initial_seed: 2
# put foreground onto background
size: 512, 512
## create foreground
size: 512, 512
// estimate depth from image and select mask by depth
// https://huggingface.co/spaces/atsantiago/Monocular_Depth_Filter
mask_depth: True
mask_depth_min: -0.05
mask_depth_max: 0.4
mask_depth_invert:False
###
prompt: corgi
ddim_steps: 25
## create background
prompt:beach landscape, beach with ocean in background, photographic, beautiful:1 red:-0.4

View File

@ -0,0 +1,34 @@
// give it some polish and details
size: 512, 512
prompt: cute corgi at beach, intricate details, photorealistic, trending on artstation
variation: 0
seed: 1360051694
initial_seed: 5
# blend it together
prompt: beautiful corgi:1.5 cute corgi at beach, trending on artstation:1 photorealistic:1.5
ddim_steps: 50
denoising_strength: 0.5
variation: 0
## put foreground in front of background
size: 512, 512
### select foreground
size: 512, 512
// estimate depth from image and select mask by depth
// https://huggingface.co/spaces/atsantiago/Monocular_Depth_Filter
mask_depth: True
mask_depth_min: -0.05
mask_depth_max: 0.4
mask_depth_invert:False
#### create foreground
prompt: corgi
ddim_steps: 25
seed: 242886303
### create background
prompt:beach landscape, beach with ocean in background, photographic, beautiful:1 red:-0.4
variation: 3

View File

@ -0,0 +1,37 @@
size: 512,512
mask_blur: 6
prompt: fantasy landscape with castle and forest and waterfalls, trending on artstation
denoising_strength: 0.6
seed: 1
image_editor_mode: Mask
mask_mode: 0
mask_restore: True
# mask the left which contains artifacts
color: 255,255,255,0
blend:multiply
size: 100,512
pos: 50,256
# mask the top-left which contains lots of artifacts
color: 255,255,255,0
blend:multiply
size: 280,128
pos: 128,64
# go forward and turn head left to look at the left waterfalls
transform3d: True
transform3d_depth_scale: 10000
transform3d_from_hfov: 60
transform3d_to_hfov: 60
transform3d_from_pose: 0,0,0, 0,0,0
transform3d_to_pose: 4000,0,2000, 0,-50,0
transform3d_min_mask: 0
transform3d_max_mask: 255
transform3d_inpaint_radius: 5
transform3d_inpaint_method: 1
##
prompt: fantasy landscape with castle and forest and waterfalls, trending on artstation
seed: 1

View File

@ -19,7 +19,6 @@ You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
-->
# Initial Setup
> This is a windows guide. [To install on Linux, see this page.](2.linux-installation.md)
@ -126,5 +125,4 @@ into the `/stable-diffusion-webui/src/gfpgan/experiments/pretrained_models` dire
# Credits
> Big thanks to Arkitecc#0339 from the Stable Diffusion discord for the original guide (support them [here](https://ko-fi.com/arkitecc)).
> Modified by [Hafiidz](https://github.com/Hafiidz) with helps from sd-webui discord and team.

View File

@ -28,6 +28,7 @@ dependencies:
- pytorch=1.11.0
- scikit-image=0.19.2
- torchvision=0.12.0
- loguru
- pip:
- -e .
- -e git+https://github.com/CompVis/taming-transformers#egg=taming-transformers
@ -39,7 +40,7 @@ dependencies:
- albumentations==0.4.3
- basicsr>=1.3.4.0
- diffusers==0.3.0
- einops==0.3.0
- einops==0.3.1
- facexlib>=0.2.3
- ftfy==6.1.1
- fairscale==0.4.4
@ -71,9 +72,10 @@ dependencies:
- streamlit-tensorboard==0.0.2
- test-tube>=0.7.5
- tensorboard==2.10.1
- timm==0.4.12
- timm==0.6.7
- torch-fidelity==0.3.0
- torchmetrics==0.6.0
- transformers==4.19.2
- tensorflow==2.10.0
- tqdm==4.64.0

View File

@ -19,15 +19,16 @@ from frontend.job_manager import JobManager
import frontend.ui_functions as uifn
import uuid
import torch
import os
def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda x: x, txt2img_defaults={},
RealESRGAN=True, GFPGAN=True, LDSR=True,
def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda x: x, scn2img=lambda x: x,
txt2img_defaults={}, RealESRGAN=True, GFPGAN=True, LDSR=True,
txt2img_toggles={}, txt2img_toggle_defaults='k_euler', show_embeddings=False, img2img_defaults={},
img2img_toggles={}, img2img_toggle_defaults={}, sample_img2img=None, img2img_mask_modes=None,
img2img_resize_modes=None, imgproc_defaults={}, imgproc_mode_toggles={}, user_defaults={},
run_GFPGAN=lambda x: x, run_RealESRGAN=lambda x: x,
img2img_resize_modes=None, imgproc_defaults={}, imgproc_mode_toggles={},
scn2img_defaults={}, scn2img_toggles={}, scn2img_toggle_defaults={}, scn2img_define_args=lambda: ({},{},{}),
user_defaults={}, run_GFPGAN=lambda x: x, run_RealESRGAN=lambda x: x,
job_manager: JobManager = None) -> gr.Blocks:
with gr.Blocks(css=css(opt), analytics_enabled=False, title="Stable Diffusion WebUI") as demo:
with gr.Tabs(elem_id='tabss') as tabs:
@ -127,7 +128,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda
txt2img_realesrgan_model_name = gr.Dropdown(label='RealESRGAN model',
choices=['RealESRGAN_x4plus',
'RealESRGAN_x4plus_anime_6B'],
value='RealESRGAN_x4plus',
value=txt2img_defaults['realesrgan_model_name'],
visible=False) # RealESRGAN is not None # invisible until removed) # TODO: Feels like I shouldnt slot it in here.
txt2img_ddim_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="DDIM ETA",
@ -243,7 +244,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda
img2img_mask_blur_strength = gr.Slider(minimum=1, maximum=100, step=1,
label="How much blurry should the mask be? (to avoid hard edges)",
value=3, visible=True)
value=img2img_defaults['mask_blur_strength'], visible=True)
img2img_resize = gr.Radio(label="Resize mode",
choices=["Just resize", "Crop and resize",
@ -330,7 +331,7 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda
img2img_realesrgan_model_name = gr.Dropdown(label='RealESRGAN model',
choices=['RealESRGAN_x4plus',
'RealESRGAN_x4plus_anime_6B'],
value='RealESRGAN_x4plus',
value=img2img_defaults['realesrgan_model_name'],
visible=RealESRGAN is not None) # TODO: Feels like I shouldnt slot it in here.
img2img_embeddings = gr.File(label="Embeddings file for textual inversion",
@ -640,6 +641,176 @@ def draw_gradio_ui(opt, img2img=lambda x: x, txt2img=lambda x: x, imgproc=lambda
imgproc_upscale_toggles.change(fn=uifn.toggle_options_gobig, inputs=[imgproc_upscale_toggles],
outputs=[gobig_group])
with gr.TabItem("Scene-to-Image", id='scn2img_tab'):
example_path = os.path.join("data","scn2img_examples")
files = os.listdir(example_path)
examples = {}
for fn in files:
filepath = os.path.join(example_path, str(fn))
with open(filepath, "r") as file:
examples[fn] = file.read()
with gr.Row(elem_id="tools_row"):
scn2img_btn = gr.Button("Generate", elem_id="generate", variant="primary")
with gr.Row().style(equal_height=False):
with gr.Column():
scn2img_seed = gr.Textbox(
label="Seed (blank to randomize, specify to use cache)", lines=1, max_lines=1,
value=scn2img_defaults["seed"]
)
scn2img_prompt = gr.Textbox(
label="Prompt Scene",
elem_id='scn2_img_input',
placeholder=examples[list(examples.keys())[0]],
lines=50,
max_lines=50,
value=scn2img_defaults['prompt'],
show_label=False
)
with gr.Column():
with gr.Tabs():
with gr.TabItem("Results", id="scn2img_results_tab"):
# gr.Markdown('#### Scn2Img Results')
output_scn2img_gallery = gr.Gallery(
label="Images",
elem_id="scn2img_gallery_output"
).style(grid=[3, 3, 3], height=80)
scn2img_job_ui = job_manager.draw_gradio_ui() if job_manager else None
with gr.Tabs():
with gr.TabItem("Generated image actions", id="scn2img_actions_tab"):
gr.Markdown("Select an image, then press one of the buttons below")
with gr.Row():
output_scn2img_copy_to_clipboard_btn = gr.Button("Copy to clipboard")
output_scn2img_copy_to_img2img_input_btn = gr.Button("Push to img2img input")
output_scn2img_copy_to_img2img_mask_btn = gr.Button("Push to img2img input mask")
gr.Markdown("Warning: This will clear your current img2img image and mask settings!")
with gr.TabItem("Output info", id="scn2img_output_info_tab"):
output_scn2img_params = gr.Highlightedtext(label="Generation parameters", interactive=False,
elem_id='scn2img_highlight')
with gr.Row():
output_scn2img_copy_params = gr.Button("Copy full parameters").click(
inputs=[output_scn2img_params],
outputs=[],
_js=call_JS(
'copyFullOutput',
fromId='scn2img_highlight'
),
fn=None, show_progress=False
)
output_scn2img_seed = gr.Number(label='Seed', interactive=False, visible=False)
output_scn2img_copy_seed = gr.Button("Copy only initial seed").click(
inputs=output_scn2img_seed, outputs=[],
_js=call_JS("gradioInputToClipboard"), fn=None, show_progress=False)
output_scn2img_stats = gr.HTML(label='Stats')
with gr.TabItem("SceneCode", id="scn2img_scncode_tab"):
output_scn2img_scncode = gr.HTML(label="SceneCode")
scn2img_toggles = gr.CheckboxGroup(label='', choices=scn2img_toggles,
value=scn2img_toggle_defaults, type="index")
scn2img_embeddings = gr.File(label="Embeddings file for textual inversion",
visible=show_embeddings)
with gr.TabItem("Docs", id="scn2img_docs_tab"):
parse_arg, function_args, function_args_ext = scn2img_define_args()
with gr.Tabs():
with gr.TabItem("syntax", id=f"scn2img_docs_syntax_tab"):
lines = [
"Scene-to-Image defines layers of images in markdown-like syntax.",
"",
"Markdown headings, e.g. '# layer0', define layers.",
"Layers are hierarchical, i.e. each layer can contain more layers.",
"Child layers are blended together by their image masks, like layers in image editors.",
"",
"The content of sections define the arguments for image generation.",
"Arguments are defined by lines of the form 'arg:value' or 'arg=value'.",
"",
"To invoke txt2img or img2img, they layer must contain the 'prompt' argument.",
"For img2img the layer must have child layers, the result of blending them will be the input image for img2img.",
"When no prompt is specified the layer can still be used for image composition and mask selection.",
]
gr.Markdown("\n".join(lines))
for func, ext in function_args_ext.items():
with gr.TabItem(func, id=f"scn2img_docs_{func}_tab"):
lines = []
for e in ext:
lines.append(f"#### Arguments for {e}")
if e not in function_args: continue
for argname,argtype in function_args[e].items():
lines.append(f" - {argname}: {argtype}")
gr.Markdown("\n".join(lines))
with gr.TabItem("Examples", id="scn2img_examples_tab"):
scn2img_examples = {}
with gr.Tabs():
for k, (example, content) in enumerate(examples.items()):
with gr.TabItem(example, id=f"scn2img_example_{k}_tab"):
scn2img_examples[example] = gr.Textbox(
label="Prompt Scene",
elem_id=f"scn2img_example_{k}",
value=content,
lines=50,
max_lines=50,
show_label=False,
interactive=True
)
output_scn2img_copy_to_img2img_input_btn.click(
uifn.copy_img_to_edit,
[output_scn2img_gallery],
[img2img_image_editor, tabs, img2img_image_editor_mode],
_js=call_JS("moveImageFromGallery",
fromId="scn2img_gallery_output",
toId="img2img_editor")
)
output_scn2img_copy_to_img2img_mask_btn.click(
uifn.copy_img_to_mask,
[output_scn2img_gallery],
[img2img_image_mask, tabs, img2img_image_editor_mode],
_js=call_JS("moveImageFromGallery",
fromId="scn2img_gallery_output",
toId="img2img_editor")
)
output_scn2img_copy_to_clipboard_btn.click(
fn=None,
inputs=output_scn2img_gallery,
outputs=[],
_js=call_JS("copyImageFromGalleryToClipboard",
fromId="scn2img_gallery_output")
)
scn2img_func = scn2img
scn2img_inputs = [
scn2img_prompt,
scn2img_toggles,
scn2img_seed,
scn2img_embeddings
]
scn2img_outputs = [
output_scn2img_gallery,
output_scn2img_seed,
output_scn2img_params,
output_scn2img_stats,
output_scn2img_scncode
]
# If a JobManager was passed in then wrap the Generate functions
if scn2img_job_ui:
scn2img_func, scn2img_inputs, scn2img_outputs = scn2img_job_ui.wrap_func(
func=scn2img_func,
inputs=scn2img_inputs,
outputs=scn2img_outputs,
)
scn2img_btn.click(
scn2img_func,
scn2img_inputs,
scn2img_outputs
)
"""
if GFPGAN is not None:
gfpgan_defaults = {

View File

@ -57,7 +57,7 @@ fairscale==0.4.4
pycocotools==2.0.5
pycocoevalcap==1.2
regex
timm==0.4.12
timm==0.6.7
tqdm==4.64.0
tensorboard==2.10.1
@ -75,4 +75,9 @@ gfpgan==1.3.8 # GFPGAN
realesrgan==0.3.0 # RealESRGAN brings in GFPGAN as a requirement
-e git+https://github.com/devilismyfriend/latent-diffusion#egg=latent-diffusion #ldsr
## for monocular depth estimation
tensorflow==2.10.0
# Orphaned Packages: No usage found

View File

@ -0,0 +1,17 @@
# The horde url
horde_url = "https://stablehorde.net"
# Give a cool name to your instance
horde_name = "My Awesome Instance"
# The api_key identifies a unique user in the horde
# Visit https://stablehorde.net/register to create one before you can join
horde_api_key = "0000000000"
# Put other users whose prompts you want to prioritize.
# The owner's username is always included so you don't need to add it here, unless you want it to have lower priority than another user
horde_priority_usernames = []
# The amount of power your system can handle
# 8 means 512*512. Each increase increases the possible resoluion by 64 pixes
# So if you put this to 2 (the minimum, your SD can only generate 64x64 pixels
# If you put this to 32, it is equivalent to 1024x1024 pixels
horde_max_power = 8
# Set this to false, if you do not want your worker to receive requests for NSFW generations
horde_nsfw = True

99
scripts/logger.py Normal file
View File

@ -0,0 +1,99 @@
import sys
from functools import partialmethod
from loguru import logger
STDOUT_LEVELS = ["GENERATION", "PROMPT"]
INIT_LEVELS = ["INIT", "INIT_OK", "INIT_WARN", "INIT_ERR"]
MESSAGE_LEVELS = ["MESSAGE"]
# By default we're at error level or higher
verbosity = 20
quiet = 0
def set_logger_verbosity(count):
global verbosity
# The count comes reversed. So count = 0 means minimum verbosity
# While count 5 means maximum verbosity
# So the more count we have, the lowe we drop the versbosity maximum
verbosity = 20 - (count * 10)
def quiesce_logger(count):
global quiet
# The bigger the count, the more silent we want our logger
quiet = count * 10
def is_stdout_log(record):
if record["level"].name not in STDOUT_LEVELS:
return(False)
if record["level"].no < verbosity + quiet:
return(False)
return(True)
def is_init_log(record):
if record["level"].name not in INIT_LEVELS:
return(False)
if record["level"].no < verbosity + quiet:
return(False)
return(True)
def is_msg_log(record):
if record["level"].name not in MESSAGE_LEVELS:
return(False)
if record["level"].no < verbosity + quiet:
return(False)
return(True)
def is_stderr_log(record):
if record["level"].name in STDOUT_LEVELS + INIT_LEVELS + MESSAGE_LEVELS:
return(False)
if record["level"].no < verbosity + quiet:
return(False)
return(True)
def test_logger():
logger.generation("This is a generation message\nIt is typically multiline\nThee Lines".encode("unicode_escape").decode("utf-8"))
logger.prompt("This is a prompt message")
logger.debug("Debug Message")
logger.info("Info Message")
logger.warning("Info Warning")
logger.error("Error Message")
logger.critical("Critical Message")
logger.init("This is an init message", status="Starting")
logger.init_ok("This is an init message", status="OK")
logger.init_warn("This is an init message", status="Warning")
logger.init_err("This is an init message", status="Error")
logger.message("This is user message")
sys.exit()
logfmt = "<level>{level: <10}</level> @ <green>{time:YYYY-MM-DD HH:mm:ss}</green> | <green>{name}</green>:<green>{function}</green>:<green>{line}</green> - <level>{message}</level>"
genfmt = "<level>{level: <10}</level> @ <green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{message}</level>"
initfmt = "<magenta>INIT </magenta> | <level>{extra[status]: <10}</level> | <magenta>{message}</magenta>"
msgfmt = "<level>{level: <10}</level> | <level>{message}</level>"
logger.level("GENERATION", no=24, color="<cyan>")
logger.level("PROMPT", no=23, color="<yellow>")
logger.level("INIT", no=31, color="<white>")
logger.level("INIT_OK", no=31, color="<green>")
logger.level("INIT_WARN", no=31, color="<yellow>")
logger.level("INIT_ERR", no=31, color="<red>")
# Messages contain important information without which this application might not be able to be used
# As such, they have the highest priority
logger.level("MESSAGE", no=61, color="<green>")
logger.__class__.generation = partialmethod(logger.__class__.log, "GENERATION")
logger.__class__.prompt = partialmethod(logger.__class__.log, "PROMPT")
logger.__class__.init = partialmethod(logger.__class__.log, "INIT")
logger.__class__.init_ok = partialmethod(logger.__class__.log, "INIT_OK")
logger.__class__.init_warn = partialmethod(logger.__class__.log, "INIT_WARN")
logger.__class__.init_err = partialmethod(logger.__class__.log, "INIT_ERR")
logger.__class__.message = partialmethod(logger.__class__.log, "MESSAGE")
config = {
"handlers": [
{"sink": sys.stderr, "format": logfmt, "colorize":True, "filter": is_stderr_log},
{"sink": sys.stdout, "format": genfmt, "level": "PROMPT", "colorize":True, "filter": is_stdout_log},
{"sink": sys.stdout, "format": initfmt, "level": "INIT", "colorize":True, "filter": is_init_log},
{"sink": sys.stdout, "format": msgfmt, "level": "MESSAGE", "colorize":True, "filter": is_msg_log}
],
}
logger.configure(**config)

View File

@ -13,7 +13,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, time
import os, time, argparse
# USER CHANGABLE ARGUMENTS
@ -36,9 +36,13 @@ share = False
# Generate tiling images
tiling = False
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--horde_name', action="store", required=False, type=str, help="The server name for the Horde. It will be shown to the world and there can be only one.")
args = parser.parse_args()
# Enter other `--arguments` you wish to use - Must be entered as a `--argument ` syntax
additional_arguments = ""
if args.horde_name:
additional_arguments += f' --horde_name {args.horde_name}'

1687
scripts/scn2img.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -13,10 +13,11 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse, os, sys, glob, re
import argparse, os, sys, glob, re, requests, json, time
import cv2
from logger import logger, set_logger_verbosity, quiesce_logger
from perlin import perlinNoise
from frontend.frontend import draw_gradio_ui
from frontend.job_manager import JobManager, JobInfo
@ -44,6 +45,7 @@ parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not
parser.add_argument("--no-verify-input", action='store_true', help="do not verify input to check if it's too long", default=False)
parser.add_argument("--optimized-turbo", action='store_true', help="alternative optimization mode that does not save as much VRAM but runs siginificantly faster")
parser.add_argument("--optimized", action='store_true', help="load the model onto the device piecemeal instead of all at once to reduce VRAM usage at the cost of performance")
parser.add_argument("--outdir_scn2img", type=str, nargs="?", help="dir to write scn2img results to (overrides --outdir)", default=None)
parser.add_argument("--outdir_img2img", type=str, nargs="?", help="dir to write img2img results to (overrides --outdir)", default=None)
parser.add_argument("--outdir_imglab", type=str, nargs="?", help="dir to write imglab results to (overrides --outdir)", default=None)
parser.add_argument("--outdir_txt2img", type=str, nargs="?", help="dir to write txt2img results to (overrides --outdir)", default=None)
@ -61,6 +63,15 @@ parser.add_argument("--skip-save", action='store_true', help="do not save indivi
parser.add_argument('--no-job-manager', action='store_true', help="Don't use the experimental job manager on top of gradio", default=False)
parser.add_argument("--max-jobs", type=int, help="Maximum number of concurrent 'generate' commands", default=1)
parser.add_argument("--tiling", action='store_true', help="Generate tiling images", default=False)
parser.add_argument('-v', '--verbosity', action='count', default=0, help="The default logging level is ERROR or higher. This value increases the amount of logging seen in your screen")
parser.add_argument('-q', '--quiet', action='count', default=0, help="The default logging level is ERROR or higher. This value decreases the amount of logging seen in your screen")
parser.add_argument("--bridge", action='store_true', help="don't launch web server, but make this instance into a Horde bridge.", default=False)
parser.add_argument('--horde_api_key', action="store", required=False, type=str, help="The API key corresponding to the owner of this Horde instance")
parser.add_argument('--horde_name', action="store", required=False, type=str, help="The server name for the Horde. It will be shown to the world and there can be only one.")
parser.add_argument('--horde_url', action="store", required=False, type=str, help="The SH Horde URL. Where the bridge will pickup prompts and send the finished generations.")
parser.add_argument('--horde_priority_usernames',type=str, action='append', required=False, help="Usernames which get priority use in this horde instance. The owner's username is always in this list.")
parser.add_argument('--horde_max_power',type=int, required=False, help="How much power this instance has to generate pictures. Min: 2")
parser.add_argument('--horde_nsfw', action='store_true', required=False, help="Set to false if you do not want this worker generating NSFW images.")
opt = parser.parse_args()
#Should not be needed anymore
@ -749,6 +760,9 @@ def get_next_sequence_number(path, prefix=''):
The sequence starts at 0.
"""
# Because when running in bridge-mode, we do not have a dir
if opt.bridge:
return(0)
result = -1
for p in Path(path).iterdir():
if p.name.endswith(('.png', '.jpg')) and p.name.startswith(prefix):
@ -933,10 +947,12 @@ def process_images(
if hasattr(model, "embedding_manager"):
load_embeddings(fp)
os.makedirs(outpath, exist_ok=True)
if not opt.bridge:
os.makedirs(outpath, exist_ok=True)
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
if not opt.bridge:
os.makedirs(sample_path, exist_ok=True)
if not ("|" in prompt) and prompt.startswith("@"):
prompt = prompt[1:]
@ -1176,7 +1192,8 @@ def process_images(
if sort_samples:
sanitized_prompt = sanitized_prompt[:128] #200 is too long
sample_path_i = os.path.join(sample_path, sanitized_prompt)
os.makedirs(sample_path_i, exist_ok=True)
if not opt.bridge:
os.makedirs(sample_path_i, exist_ok=True)
base_count = get_next_sequence_number(sample_path_i)
filename = opt.filename_format or "[STEPS]_[SAMPLER]_[SEED]_[VARIANT_AMOUNT]"
else:
@ -1364,9 +1381,23 @@ Peak memory usage: { -(mem_max_used // -1_048_576) } MiB / { -(mem_total // -1_0
return output_images, seed, info, stats
def txt2img(prompt: str, ddim_steps: int, sampler_name: str, toggles: List[int], realesrgan_model_name: str,
ddim_eta: float, n_iter: int, batch_size: int, cfg_scale: float, seed: Union[int, str, None],
height: int, width: int, fp, variant_amount: float = None, variant_seed: int = None, job_info: JobInfo = None):
def txt2img(
prompt: str,
ddim_steps: int = 50,
sampler_name: str = 'k_lms',
toggles: List[int] = [1, 4],
realesrgan_model_name: str = '',
ddim_eta: float = 0.0,
n_iter: int = 1,
batch_size: int = 1,
cfg_scale: float = 5.0,
seed: Union[int, str, None] = None,
height: int = 512,
width: int = 512,
fp = None,
variant_amount: float = 0.0,
variant_seed: int = None,
job_info: JobInfo = None):
outpath = opt.outdir_txt2img or opt.outdir or "outputs/txt2img-samples"
err = False
seed = seed_to_int(seed)
@ -2384,6 +2415,7 @@ txt2img_defaults = {
'variant_amount': 0.0,
'variant_seed': '',
'submit_on_enter': 'Yes',
'realesrgan_model_name': 'RealESRGAN_x4plus',
}
if 'txt2img' in user_defaults:
@ -2458,6 +2490,9 @@ img2img_defaults = {
'height': 512,
'width': 512,
'fp': None,
'mask_blur_strength': 1,
'realesrgan_model_name': 'RealESRGAN_x4plus',
'image_editor_mode': 'Mask'
}
if 'img2img' in user_defaults:
@ -2466,6 +2501,34 @@ if 'img2img' in user_defaults:
img2img_toggle_defaults = [img2img_toggles[i] for i in img2img_defaults['toggles']]
img2img_image_mode = 'sketch'
from scn2img import get_scn2img, scn2img_define_args
# avoid circular import, by passing all necessary types, functions
# and variables to get_scn2img, which will return scn2img function.
scn2img = get_scn2img(
MemUsageMonitor, save_sample, get_next_sequence_number, seed_to_int,
txt2img, txt2img_defaults, img2img, img2img_defaults,
opt
)
scn2img_toggles = [
'Clear Cache',
'Output intermediate images',
'Save individual images',
'Write sample info files',
'Write sample info to one file',
'jpg samples',
]
scn2img_defaults = {
'prompt': '',
'seed': '',
'toggles': [1, 2, 3]
}
if 'scn2img' in user_defaults:
scn2img_defaults.update(user_defaults['scn2img'])
scn2img_toggle_defaults = [scn2img_toggles[i] for i in scn2img_defaults['toggles']]
help_text = """
## Mask/Crop
* The masking/cropping is very temperamental.
@ -2494,6 +2557,7 @@ demo = draw_gradio_ui(opt,
txt2img=txt2img,
img2img=img2img,
imgproc=imgproc,
scn2img=scn2img,
txt2img_defaults=txt2img_defaults,
txt2img_toggles=txt2img_toggles,
txt2img_toggle_defaults=txt2img_toggle_defaults,
@ -2506,6 +2570,10 @@ demo = draw_gradio_ui(opt,
sample_img2img=sample_img2img,
imgproc_defaults=imgproc_defaults,
imgproc_mode_toggles=imgproc_mode_toggles,
scn2img_defaults=scn2img_defaults,
scn2img_toggles=scn2img_toggles,
scn2img_toggle_defaults=scn2img_toggle_defaults,
scn2img_define_args=scn2img_define_args,
RealESRGAN=RealESRGAN,
GFPGAN=GFPGAN,
LDSR=LDSR,
@ -2580,8 +2648,150 @@ def run_headless():
print(stats)
print()
@logger.catch
def run_bridge(interval, api_key, horde_name, horde_url, priority_usernames, horde_max_pixels, horde_nsfw):
current_id = None
current_payload = None
loop_retry = 0
while True:
gen_dict = {
"name": horde_name,
"max_pixels": horde_max_pixels,
"priority_usernames": priority_usernames,
"nsfw": horde_nsfw,
}
headers = {"apikey": api_key}
if current_id:
loop_retry += 1
else:
try:
pop_req = requests.post(horde_url + '/api/v2/generate/pop', json = gen_dict, headers = headers)
except requests.exceptions.ConnectionError:
logger.warning(f"Server {horde_url} unavailable during pop. Waiting 10 seconds...")
time.sleep(10)
continue
except requests.exceptions.JSONDecodeError():
logger.warning(f"Server {horde_url} unavailable during pop. Waiting 10 seconds...")
time.sleep(10)
continue
try:
pop = pop_req.json()
except json.decoder.JSONDecodeError:
logger.error(f"Could not decode response from {horde_url} as json. Please inform its administrator!")
time.sleep(interval)
continue
if pop == None:
logger.error(f"Something has gone wrong with {horde_url}. Please inform its administrator!")
time.sleep(interval)
continue
if not pop_req.ok:
message = pop['message']
logger.warning(f"During gen pop, server {horde_url} responded with status code {pop_req.status_code}: {pop['message']}. Waiting for 10 seconds...")
if 'errors' in pop:
logger.warning(f"Detailed Request Errors: {pop['errors']}")
time.sleep(10)
continue
if not pop.get("id"):
skipped_info = pop.get('skipped')
if skipped_info and len(skipped_info):
skipped_info = f" Skipped Info: {skipped_info}."
else:
skipped_info = ''
logger.debug(f"Server {horde_url} has no valid generations to do for us.{skipped_info}")
time.sleep(interval)
continue
current_id = pop['id']
logger.debug(f"Request with id {current_id} picked up. Initiating work...")
current_payload = pop['payload']
if 'toggles' in current_payload and current_payload['toggles'] == None:
logger.error(f"Received Bad payload: {pop}")
current_id = None
current_payload = None
current_generation = None
time.sleep(10)
continue
images, seed, info, stats = txt2img(**current_payload)
buffer = BytesIO()
# We send as WebP to avoid using all the horde bandwidth
images[0].save(buffer, format="WebP", quality=90)
# logger.info(info)
submit_dict = {
"id": current_id,
"generation": base64.b64encode(buffer.getvalue()).decode("utf8"),
"api_key": api_key,
"seed": seed,
"max_pixels": horde_max_pixels,
}
current_generation = seed
while current_id and current_generation:
try:
submit_req = requests.post(horde_url + '/api/v2/generate/submit', json = submit_dict, headers = headers)
try:
submit = submit_req.json()
except json.decoder.JSONDecodeError:
logger.error(f"Something has gone wrong with {horde_url} during submit. Please inform its administrator!")
time.sleep(interval)
continue
if submit_req.status_code == 404:
logger.warning(f"The generation we were working on got stale. Aborting!")
elif not submit_req.ok:
logger.warning(f"During gen submit, server {horde_url} responded with status code {submit_req.status_code}: {submit['message']}. Waiting for 10 seconds...")
if 'errors' in submit:
logger.warning(f"Detailed Request Errors: {submit['errors']}")
time.sleep(10)
continue
else:
logger.info(f'Submitted generation with id {current_id} and contributed for {submit_req.json()["reward"]}')
current_id = None
current_payload = None
current_generation = None
except requests.exceptions.ConnectionError:
logger.warning(f"Server {horde_url} unavailable during submit. Waiting 10 seconds...")
time.sleep(10)
continue
time.sleep(interval)
if __name__ == '__main__':
if opt.cli is None:
launch_server()
else:
set_logger_verbosity(opt.verbosity)
quiesce_logger(opt.quiet)
if opt.cli:
run_headless()
if opt.bridge:
try:
import bridgeData as cd
except:
logger.warning("No bridgeData found, use default where no CLI args are set")
class temp(object):
def __init__(self):
random.seed()
self.horde_url = "https://stablehorde.net"
# Give a cool name to your instance
self.horde_name = f"Automated Instance #{random.randint(-100000000, 100000000)}"
# The api_key identifies a unique user in the horde
self.horde_api_key = "0000000000"
# Put other users whose prompts you want to prioritize.
# The owner's username is always included so you don't need to add it here, unless you want it to have lower priority than another user
self.horde_priority_usernames = []
self.horde_max_power = 8
self.nsfw = True
cd = temp()
horde_api_key = opt.horde_api_key if opt.horde_api_key else cd.horde_api_key
horde_name = opt.horde_name if opt.horde_name else cd.horde_name
horde_url = opt.horde_url if opt.horde_url else cd.horde_url
horde_priority_usernames = opt.horde_priority_usernames if opt.horde_priority_usernames else cd.horde_priority_usernames
horde_max_power = opt.horde_max_power if opt.horde_max_power else cd.horde_max_power
try:
horde_nsfw = opt.horde_nsfw if opt.horde_nsfw else cd.horde_nsfw
except AttributeError:
horde_nsfw = True
if horde_max_power < 2:
horde_max_power = 2
horde_max_pixels = 64*64*8*horde_max_power
logger.info(f"Joining Horde with parameters: API Key '{horde_api_key}'. Server Name '{horde_name}'. Horde URL '{horde_url}'. Max Pixels {horde_max_pixels}")
try:
run_bridge(1, horde_api_key, horde_name, horde_url, horde_priority_usernames, horde_max_pixels, horde_nsfw)
except KeyboardInterrupt:
logger.info(f"Keyboard Interrupt Received. Ending Bridge")
else:
launch_server()

View File

@ -97,7 +97,7 @@ call "%v_conda_path%\Scripts\activate.bat" "%v_conda_env_name%"
set SETUPTOOLS_USE_DISTUTILS=stdlib
IF EXIST "models\ldm\stable-diffusion-v1\model.ckpt" (
set "PYTHONPATH=%~dp0"
python scripts\relauncher.py
python scripts\relauncher.py %*
) ELSE (
echo Your model file does not exist! Place it in 'models\ldm\stable-diffusion-v1' with the name 'model.ckpt'.
pause

View File

@ -162,7 +162,7 @@ launch_webui () {
select yn in "Streamlit" "Gradio"; do
case $yn in
Streamlit ) printf "\nStarting Stable Diffusion WebUI: Streamlit Interface. Please Wait...\n"; python -m streamlit run scripts/webui_streamlit.py; break;;
Gradio ) printf "\nStarting Stable Diffusion WebUI: Gradio Interface. Please Wait...\n"; python scripts/relauncher.py; break;;
Gradio ) printf "\nStarting Stable Diffusion WebUI: Gradio Interface. Please Wait...\n"; python scripts/relauncher.py $@; break;;
esac
done
}