2022-09-10 12:06:19 +03:00
|
|
|
import math
|
2022-09-03 12:08:45 +03:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import traceback
|
|
|
|
import torch
|
|
|
|
import numpy as np
|
2022-09-05 01:41:20 +03:00
|
|
|
from torch import einsum
|
2022-10-03 00:31:19 +03:00
|
|
|
from torch.nn.functional import silu
|
2022-09-03 12:08:45 +03:00
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
import modules.textual_inversion.textual_inversion
|
2022-10-07 23:22:22 +03:00
|
|
|
from modules import prompt_parser, devices, sd_hijack_optimizations, shared
|
2022-11-26 16:10:46 +03:00
|
|
|
from modules.shared import cmd_opts
|
|
|
|
from modules import sd_hijack_clip, sd_hijack_open_clip
|
|
|
|
|
2022-10-11 06:55:48 +03:00
|
|
|
from modules.sd_hijack_optimizations import invokeAI_mps_available
|
2022-09-05 01:41:20 +03:00
|
|
|
|
|
|
|
import ldm.modules.attention
|
2022-09-13 14:29:56 +03:00
|
|
|
import ldm.modules.diffusionmodules.model
|
2022-11-11 18:20:18 +03:00
|
|
|
import ldm.models.diffusion.ddim
|
|
|
|
import ldm.models.diffusion.plms
|
2022-11-26 16:10:46 +03:00
|
|
|
import ldm.modules.encoders.modules
|
2022-09-13 14:29:56 +03:00
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
|
|
|
|
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
|
|
|
|
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
|
2022-09-13 14:29:56 +03:00
|
|
|
|
2022-11-26 16:10:46 +03:00
|
|
|
# new memory efficient cross attention blocks do not support hypernets and we already
|
|
|
|
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
|
|
|
|
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
|
|
|
|
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
|
|
|
|
|
|
|
# silence new console spam from SD2
|
|
|
|
ldm.modules.attention.print = lambda *args: None
|
|
|
|
ldm.modules.diffusionmodules.model.print = lambda *args: None
|
2022-10-15 16:59:37 +03:00
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
def apply_optimizations():
|
2022-10-07 16:39:51 +03:00
|
|
|
undo_optimizations()
|
|
|
|
|
2022-10-03 00:31:19 +03:00
|
|
|
ldm.modules.diffusionmodules.model.nonlinearity = silu
|
2022-09-13 14:29:56 +03:00
|
|
|
|
2022-10-15 19:19:54 +03:00
|
|
|
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
|
2022-10-08 19:22:15 +03:00
|
|
|
print("Applying xformers cross attention optimization.")
|
2022-10-08 17:44:53 +03:00
|
|
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
|
2022-10-17 22:19:18 +03:00
|
|
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
|
2022-10-08 04:10:35 +03:00
|
|
|
elif cmd_opts.opt_split_attention_v1:
|
2022-10-08 19:22:15 +03:00
|
|
|
print("Applying v1 cross attention optimization.")
|
2022-10-02 15:03:39 +03:00
|
|
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
|
2022-10-11 05:48:54 +03:00
|
|
|
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
|
2022-10-11 06:55:48 +03:00
|
|
|
if not invokeAI_mps_available and shared.device.type == 'mps':
|
|
|
|
print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
|
|
|
|
print("Applying v1 cross attention optimization.")
|
|
|
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
|
|
|
|
else:
|
|
|
|
print("Applying cross attention optimization (InvokeAI).")
|
|
|
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
|
2022-10-02 15:03:39 +03:00
|
|
|
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
|
2022-10-11 05:48:54 +03:00
|
|
|
print("Applying cross attention optimization (Doggettx).")
|
2022-10-02 15:03:39 +03:00
|
|
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
|
|
|
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
|
2022-09-13 14:29:56 +03:00
|
|
|
|
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
def undo_optimizations():
|
2022-11-26 16:10:46 +03:00
|
|
|
ldm.modules.attention.CrossAttention.forward = attention_CrossAttention_forward # this stops hypernets from working
|
2022-10-02 15:03:39 +03:00
|
|
|
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
|
|
|
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
|
2022-09-13 14:29:56 +03:00
|
|
|
|
2022-09-03 12:08:45 +03:00
|
|
|
|
2022-10-08 14:25:47 +03:00
|
|
|
|
2022-09-03 12:08:45 +03:00
|
|
|
class StableDiffusionModelHijack:
|
|
|
|
fixes = None
|
|
|
|
comments = []
|
2022-09-05 03:25:37 +03:00
|
|
|
layers = None
|
|
|
|
circular_enabled = False
|
2022-09-27 22:56:18 +03:00
|
|
|
clip = None
|
2022-09-03 12:08:45 +03:00
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
|
2022-09-03 12:08:45 +03:00
|
|
|
|
|
|
|
def hijack(self, m):
|
2022-11-26 16:10:46 +03:00
|
|
|
if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder:
|
|
|
|
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
|
|
|
|
model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self)
|
|
|
|
m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
|
|
|
|
elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder:
|
|
|
|
m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
|
|
|
|
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
|
2022-09-29 15:40:28 +03:00
|
|
|
|
2022-09-27 22:56:18 +03:00
|
|
|
self.clip = m.cond_stage_model
|
2022-09-03 12:08:45 +03:00
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
apply_optimizations()
|
2022-09-05 01:41:20 +03:00
|
|
|
|
2022-09-05 03:25:37 +03:00
|
|
|
def flatten(el):
|
|
|
|
flattened = [flatten(children) for children in el.children()]
|
|
|
|
res = [el]
|
|
|
|
for c in flattened:
|
|
|
|
res += c
|
|
|
|
return res
|
|
|
|
|
|
|
|
self.layers = flatten(m)
|
|
|
|
|
2022-09-29 15:40:28 +03:00
|
|
|
def undo_hijack(self, m):
|
2022-11-26 16:10:46 +03:00
|
|
|
if type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
|
2022-09-29 15:40:28 +03:00
|
|
|
m.cond_stage_model = m.cond_stage_model.wrapped
|
|
|
|
|
2022-11-26 16:10:46 +03:00
|
|
|
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
|
|
|
|
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
|
|
|
|
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
|
|
|
|
elif type(m.cond_stage_model) == sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords:
|
|
|
|
m.cond_stage_model.wrapped.model.token_embedding = m.cond_stage_model.wrapped.model.token_embedding.wrapped
|
|
|
|
m.cond_stage_model = m.cond_stage_model.wrapped
|
2022-09-29 15:40:28 +03:00
|
|
|
|
2022-11-18 13:22:55 +03:00
|
|
|
self.apply_circular(False)
|
2022-11-01 10:01:49 +03:00
|
|
|
self.layers = None
|
|
|
|
self.clip = None
|
|
|
|
|
2022-09-05 03:25:37 +03:00
|
|
|
def apply_circular(self, enable):
|
|
|
|
if self.circular_enabled == enable:
|
|
|
|
return
|
|
|
|
|
|
|
|
self.circular_enabled = enable
|
|
|
|
|
|
|
|
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
|
|
|
|
layer.padding_mode = 'circular' if enable else 'zeros'
|
|
|
|
|
2022-10-08 00:48:34 +03:00
|
|
|
def clear_comments(self):
|
|
|
|
self.comments = []
|
|
|
|
|
2022-09-27 22:56:18 +03:00
|
|
|
def tokenize(self, text):
|
|
|
|
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
|
2022-11-26 16:10:46 +03:00
|
|
|
return remade_batch_tokens[0], token_count, sd_hijack_clip.get_target_prompt_token_count(token_count)
|
2022-09-03 12:08:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EmbeddingsWithFixes(torch.nn.Module):
|
|
|
|
def __init__(self, wrapped, embeddings):
|
|
|
|
super().__init__()
|
|
|
|
self.wrapped = wrapped
|
|
|
|
self.embeddings = embeddings
|
|
|
|
|
|
|
|
def forward(self, input_ids):
|
|
|
|
batch_fixes = self.embeddings.fixes
|
|
|
|
self.embeddings.fixes = None
|
|
|
|
|
|
|
|
inputs_embeds = self.wrapped(input_ids)
|
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
|
|
|
|
return inputs_embeds
|
|
|
|
|
|
|
|
vecs = []
|
|
|
|
for fixes, tensor in zip(batch_fixes, inputs_embeds):
|
|
|
|
for offset, embedding in fixes:
|
|
|
|
emb = embedding.vec
|
2022-10-15 16:59:37 +03:00
|
|
|
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
|
|
|
|
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
|
2022-10-02 15:03:39 +03:00
|
|
|
|
|
|
|
vecs.append(tensor)
|
2022-09-03 12:08:45 +03:00
|
|
|
|
2022-10-02 15:03:39 +03:00
|
|
|
return torch.stack(vecs)
|
2022-09-03 12:08:45 +03:00
|
|
|
|
|
|
|
|
2022-09-05 02:16:36 +03:00
|
|
|
def add_circular_option_to_conv_2d():
|
|
|
|
conv2d_constructor = torch.nn.Conv2d.__init__
|
2022-09-05 01:41:20 +03:00
|
|
|
|
2022-09-05 02:16:36 +03:00
|
|
|
def conv2d_constructor_circular(self, *args, **kwargs):
|
|
|
|
return conv2d_constructor(self, *args, padding_mode='circular', **kwargs)
|
2022-09-05 01:41:20 +03:00
|
|
|
|
2022-09-05 02:16:36 +03:00
|
|
|
torch.nn.Conv2d.__init__ = conv2d_constructor_circular
|
2022-09-05 01:41:20 +03:00
|
|
|
|
|
|
|
|
2022-09-03 12:08:45 +03:00
|
|
|
model_hijack = StableDiffusionModelHijack()
|
2022-11-11 18:20:18 +03:00
|
|
|
|
|
|
|
|
|
|
|
def register_buffer(self, name, attr):
|
|
|
|
"""
|
|
|
|
Fix register buffer bug for Mac OS.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if type(attr) == torch.Tensor:
|
|
|
|
if attr.device != devices.device:
|
|
|
|
|
2022-11-12 10:00:22 +03:00
|
|
|
if devices.has_mps():
|
2022-11-11 18:20:18 +03:00
|
|
|
attr = attr.to(device="mps", dtype=torch.float32)
|
|
|
|
else:
|
|
|
|
attr = attr.to(devices.device)
|
|
|
|
|
|
|
|
setattr(self, name, attr)
|
|
|
|
|
|
|
|
|
|
|
|
ldm.models.diffusion.ddim.DDIMSampler.register_buffer = register_buffer
|
|
|
|
ldm.models.diffusion.plms.PLMSSampler.register_buffer = register_buffer
|