mirror of
https://github.com/sd-webui/stable-diffusion-webui.git
synced 2024-12-15 07:12:58 +03:00
33b896d0cb
# Summary of the change - new Scene-to-Image tab - new scn2img function - functions for loading and running monocular_depth_estimation with tensorflow # Description (relevant motivation, which issue is fixed) Related to discussion #925 > Would it be possible to have a layers system where we could do have foreground, mid, and background objects which relate to one another and share the style? So we could say generate a landscape, one another layer generate a castle, and on another layer generate a crowd of people. To make this work I made a prompt-based layering system in a new "Scene-to-Image" tab. You write a a multi-line prompt that looks like markdown, where each section declares one layer. It is hierarchical, so each layer can have their own child layers. Examples: https://imgur.com/a/eUxd5qn ![](https://i.imgur.com/L61w00Q.png) In the frontend you can find a brief documentation for the syntax, examples and reference for the various arguments. Here a short summary: Sections with "prompt" and child layers are img2img, without child layers they are txt2img. Without "prompt" they are just images, useful for mask selection, image composition, etc. Images can be initialized with "color", resized with "resize" and their position specified with "pos". Rotation and rotation center are "rotation" and "center". Mask can automatically be selected by color or by estimated depth based on https://huggingface.co/spaces/atsantiago/Monocular_Depth_Filter. ![](https://i.imgur.com/8rMHWmZ.png) # Additional dependencies that are required for this change For mask selection by monocular depth estimation tensorflow is required and the model must be cloned to ./src/monocular_depth_estimation/ Changes in environment.yaml: - einops>=0.3.0 - tensorflow>=2.10.0 Einops must be allowed to be newer for tensorflow to work. # Checklist: - [x] I have changed the base branch to `dev` - [x] I have performed a self-review of my own code - [x] I have commented my code in hard-to-understand areas - [x] I have made corresponding changes to the documentation Co-authored-by: hlky <106811348+hlky@users.noreply.github.com>
41 lines
1.0 KiB
Markdown
41 lines
1.0 KiB
Markdown
// blend it together and finish it with details
|
|
prompt: cute happy orange cat sitting at beach, beach in background, trending on artstation:1 cute happy cat:1
|
|
sampler_name:k_euler_a
|
|
ddim_steps: 35
|
|
denoising_strength: 0.55
|
|
variation: 3
|
|
initial_seed: 1
|
|
|
|
# put foreground onto background
|
|
size: 512, 512
|
|
color: 0,0,0
|
|
|
|
## create foreground
|
|
size:512,512
|
|
color:0,0,0,0
|
|
resize: 300, 300
|
|
pos: 256, 350
|
|
|
|
// select mask by probing some pixels from the image
|
|
mask_by_color_at: 15, 15, 15, 256, 85, 465, 100, 480
|
|
mask_by_color_threshold:80
|
|
mask_by_color_space: HLS
|
|
|
|
// some pixels inside the cat may be selected, remove them with mask_open
|
|
mask_open: 15
|
|
|
|
// there is still some background pixels left at the edge between cat and background
|
|
// grow the mask to get them as well
|
|
mask_grow: 15
|
|
|
|
// we want to remove whatever is masked:
|
|
mask_invert: True
|
|
|
|
####
|
|
prompt: cute happy orange cat, white background
|
|
ddim_steps: 25
|
|
variation: 1
|
|
|
|
## create background
|
|
prompt:beach landscape, beach with ocean in background, photographic, beautiful:1 red:-0.4
|