mirror of
https://github.com/Sygil-Dev/sygil-webui.git
synced 2024-12-15 06:21:34 +03:00
Revert "Run model on cpu if there is only cpu (#399)"
This reverts commit 277cc46646
.
This commit is contained in:
parent
66dac9bcf9
commit
e1e1d50a94
20
webui.py
20
webui.py
@ -143,10 +143,7 @@ def load_model_from_config(config, ckpt, verbose=False):
|
||||
print("unexpected keys:")
|
||||
print(u)
|
||||
|
||||
if device.type == "cpu":
|
||||
model.cpu()
|
||||
else:
|
||||
model.cuda()
|
||||
model.cuda()
|
||||
model.eval()
|
||||
return model
|
||||
|
||||
@ -272,10 +269,8 @@ def create_random_tensors(shape, seeds):
|
||||
return x
|
||||
|
||||
def torch_gc():
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.ipc_collect()
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.ipc_collect()
|
||||
def load_LDSR():
|
||||
model_name = 'model'
|
||||
yaml_name = 'project'
|
||||
@ -290,7 +285,6 @@ def load_LDSR():
|
||||
from LDSR import LDSR
|
||||
LDSRObject = LDSR(model_path, yaml_path)
|
||||
return LDSRObject
|
||||
|
||||
def load_GFPGAN():
|
||||
model_name = 'GFPGANv1.3'
|
||||
model_path = os.path.join(GFPGAN_dir, 'experiments/pretrained_models', model_name + '.pth')
|
||||
@ -394,10 +388,7 @@ def load_SD_model():
|
||||
|
||||
model = instantiate_from_config(config.modelUNet)
|
||||
_, _ = model.load_state_dict(sd, strict=False)
|
||||
if device.type == "cpu":
|
||||
model.cpu()
|
||||
else:
|
||||
model.cuda()
|
||||
model.cuda()
|
||||
model.eval()
|
||||
model.turbo = opt.optimized_turbo
|
||||
|
||||
@ -410,7 +401,6 @@ def load_SD_model():
|
||||
_, _ = modelFS.load_state_dict(sd, strict=False)
|
||||
modelFS.eval()
|
||||
|
||||
|
||||
del sd
|
||||
|
||||
if not opt.no_half:
|
||||
@ -795,7 +785,7 @@ def process_images(
|
||||
output_images = []
|
||||
grid_captions = []
|
||||
stats = []
|
||||
with torch.no_grad(), precision_scope("cuda") if torch.cuda.is_available() else precision_scope("cpu"), (model.ema_scope() if not opt.optimized else nullcontext()):
|
||||
with torch.no_grad(), precision_scope("cuda"), (model.ema_scope() if not opt.optimized else nullcontext()):
|
||||
init_data = func_init()
|
||||
tic = time.time()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user