mirror of
https://github.com/openvinotoolkit/stable-diffusion-webui.git
synced 2024-12-15 07:03:06 +03:00
readme extras for VRAM for
added missing packages to requirements for #74 add support for negative numbers in X/Y plot (plus ranges) #73 changed progressbar to work properly with custom modes
This commit is contained in:
parent
beece7d85c
commit
395f170597
26
README.md
26
README.md
@ -149,9 +149,9 @@ Open the URL in browser, and you are good to go.
|
|||||||
### What options to use for low VRAM videocards?
|
### What options to use for low VRAM videocards?
|
||||||
- If you have 4GB VRAM and want to make 512x512 (or maybe up to 640x640) images, use `--medvram`.
|
- If you have 4GB VRAM and want to make 512x512 (or maybe up to 640x640) images, use `--medvram`.
|
||||||
- If you have 4GB VRAM and want to make 512x512 images, but you get an out of memory error with `--medvram`, use `--medvram --opt-split-attention` instead.
|
- If you have 4GB VRAM and want to make 512x512 images, but you get an out of memory error with `--medvram`, use `--medvram --opt-split-attention` instead.
|
||||||
- If you have 4GB VRAM and want to make 512x512 images, and you still get an out of memory error, use `--lowvram --always-batch-cond-uncond` instead.
|
- If you have 4GB VRAM and want to make 512x512 images, and you still get an out of memory error, use `--lowvram --always-batch-cond-uncond --opt-split-attention` instead.
|
||||||
- If you have 4GB VRAM and want to make images larger than you can with `--medvram`, use `--lowvram`.
|
- If you have 4GB VRAM and want to make images larger than you can with `--medvram`, use `--lowvram --opt-split-attention`.
|
||||||
- If you have more VRAM and want to make larger images than you can usually make, use `--medvram`. You can use `--lowvram`
|
- If you have more VRAM and want to make larger images than you can usually make, use `--medvram --opt-split-attention`. You can use `--lowvram`
|
||||||
also but the effect will likely be barely noticeable.
|
also but the effect will likely be barely noticeable.
|
||||||
- Otherwise, do not use any of those.
|
- Otherwise, do not use any of those.
|
||||||
|
|
||||||
@ -159,6 +159,26 @@ Extra: if you get a green screen instead of generated pictures, you have a card
|
|||||||
precision floating point numbers. You must use `--precision full --no-half` in addition to other flags,
|
precision floating point numbers. You must use `--precision full --no-half` in addition to other flags,
|
||||||
and the model will take much more space in VRAM.
|
and the model will take much more space in VRAM.
|
||||||
|
|
||||||
|
### How to change UI defaults?
|
||||||
|
|
||||||
|
After running once, a `ui-config.json` file appears in webui directory:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"txt2img/Sampling Steps/value": 20,
|
||||||
|
"txt2img/Sampling Steps/minimum": 1,
|
||||||
|
"txt2img/Sampling Steps/maximum": 150,
|
||||||
|
"txt2img/Sampling Steps/step": 1,
|
||||||
|
"txt2img/Batch count/value": 1,
|
||||||
|
"txt2img/Batch count/minimum": 1,
|
||||||
|
"txt2img/Batch count/maximum": 32,
|
||||||
|
"txt2img/Batch count/step": 1,
|
||||||
|
"txt2img/Batch size/value": 1,
|
||||||
|
"txt2img/Batch size/minimum": 1,
|
||||||
|
```
|
||||||
|
|
||||||
|
Edit values to your liking and the next time you launch the program they will be applied.
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
|
- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
|
||||||
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
|
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
|
||||||
|
@ -153,6 +153,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
|||||||
with torch.no_grad(), precision_scope("cuda"), ema_scope():
|
with torch.no_grad(), precision_scope("cuda"), ema_scope():
|
||||||
p.init()
|
p.init()
|
||||||
|
|
||||||
|
if state.job_count == -1:
|
||||||
state.job_count = p.n_iter
|
state.job_count = p.n_iter
|
||||||
|
|
||||||
for n in range(p.n_iter):
|
for n in range(p.n_iter):
|
||||||
|
@ -54,6 +54,7 @@ class State:
|
|||||||
self.job_no += 1
|
self.job_no += 1
|
||||||
self.sampling_step = 0
|
self.sampling_step = 0
|
||||||
|
|
||||||
|
|
||||||
state = State()
|
state = State()
|
||||||
|
|
||||||
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
|
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
|
||||||
|
@ -140,7 +140,10 @@ def check_progress_call():
|
|||||||
if shared.state.job_count == 0:
|
if shared.state.job_count == 0:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
progress = shared.state.job_no / shared.state.job_count
|
progress = 0
|
||||||
|
|
||||||
|
if shared.state.job_count > 0:
|
||||||
|
progress += shared.state.job_no / shared.state.job_count
|
||||||
if shared.state.sampling_steps > 0:
|
if shared.state.sampling_steps > 0:
|
||||||
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
|
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
|
||||||
|
|
||||||
|
@ -8,3 +8,7 @@ torch
|
|||||||
transformers
|
transformers
|
||||||
omegaconf
|
omegaconf
|
||||||
pytorch_lightning
|
pytorch_lightning
|
||||||
|
diffusers
|
||||||
|
invisible-watermark
|
||||||
|
git+https://github.com/crowsonkb/k-diffusion.git
|
||||||
|
git+https://github.com/TencentARC/GFPGAN.git
|
||||||
|
@ -9,6 +9,7 @@ from modules import images
|
|||||||
from modules.processing import process_images, Processed
|
from modules.processing import process_images, Processed
|
||||||
from modules.shared import opts, cmd_opts, state
|
from modules.shared import opts, cmd_opts, state
|
||||||
import modules.sd_samplers
|
import modules.sd_samplers
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
def apply_field(field):
|
def apply_field(field):
|
||||||
@ -89,6 +90,8 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
|
|||||||
return first_pocessed
|
return first_pocessed
|
||||||
|
|
||||||
|
|
||||||
|
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
|
||||||
|
|
||||||
class Script(scripts.Script):
|
class Script(scripts.Script):
|
||||||
def title(self):
|
def title(self):
|
||||||
return "X/Y plot"
|
return "X/Y plot"
|
||||||
@ -118,11 +121,13 @@ class Script(scripts.Script):
|
|||||||
valslist_ext = []
|
valslist_ext = []
|
||||||
|
|
||||||
for val in valslist:
|
for val in valslist:
|
||||||
if "-" in val:
|
m = re_range.fullmatch(val)
|
||||||
s = val.split("-")
|
if m is not None:
|
||||||
start = int(s[0])
|
|
||||||
end = int(s[1])+1
|
start = int(m.group(1))
|
||||||
step = 1 if len(s) < 3 else int(s[2])
|
end = int(m.group(2))+1
|
||||||
|
step = int(m.group(3)) if m.group(3) is not None else 1
|
||||||
|
|
||||||
valslist_ext += list(range(start, end, step))
|
valslist_ext += list(range(start, end, step))
|
||||||
else:
|
else:
|
||||||
valslist_ext.append(val)
|
valslist_ext.append(val)
|
||||||
|
2
webui.py
2
webui.py
@ -123,7 +123,7 @@ queue_lock = threading.Lock()
|
|||||||
def wrap_gradio_gpu_call(func):
|
def wrap_gradio_gpu_call(func):
|
||||||
def f(*args, **kwargs):
|
def f(*args, **kwargs):
|
||||||
shared.state.sampling_step = 0
|
shared.state.sampling_step = 0
|
||||||
shared.state.job_count = 1
|
shared.state.job_count = -1
|
||||||
shared.state.job_no = 0
|
shared.state.job_no = 0
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user