Spaces:
Runtime error
Runtime error
Update MagicQuill/comfy/sd.py
Browse files- MagicQuill/comfy/sd.py +9 -2
MagicQuill/comfy/sd.py
CHANGED
|
@@ -465,10 +465,17 @@ def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DI
|
|
| 465 |
def load_gligen(ckpt_path):
|
| 466 |
data = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
|
| 467 |
model = gligen.load_gligen(data)
|
| 468 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 469 |
model = model.half()
|
| 470 |
-
|
|
|
|
| 471 |
|
|
|
|
|
|
|
| 472 |
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
|
| 473 |
logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.")
|
| 474 |
model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True)
|
|
|
|
| 465 |
def load_gligen(ckpt_path):
|
| 466 |
data = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
|
| 467 |
model = gligen.load_gligen(data)
|
| 468 |
+
|
| 469 |
+
device = model_management.get_torch_device()
|
| 470 |
+
model = model.to(device)
|
| 471 |
+
|
| 472 |
+
if device.type == "cuda" and model_management.should_use_fp16():
|
| 473 |
model = model.half()
|
| 474 |
+
else:
|
| 475 |
+
model = model.float()
|
| 476 |
|
| 477 |
+
return comfy.model_patcher.ModelPatcher(model, load_device=device)
|
| 478 |
+
|
| 479 |
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
|
| 480 |
logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.")
|
| 481 |
model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True)
|