Update app.py
Browse files
app.py
CHANGED
|
@@ -8,11 +8,12 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 8 |
|
| 9 |
if torch.cuda.is_available():
|
| 10 |
torch.cuda.max_memory_allocated(device=device)
|
| 11 |
-
pipe = DiffusionPipeline.from_pretrained("
|
|
|
|
| 12 |
pipe.enable_xformers_memory_efficient_attention()
|
| 13 |
pipe = pipe.to(device)
|
| 14 |
else:
|
| 15 |
-
pipe = DiffusionPipeline.from_pretrained("
|
| 16 |
pipe = pipe.to(device)
|
| 17 |
|
| 18 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 8 |
|
| 9 |
if torch.cuda.is_available():
|
| 10 |
torch.cuda.max_memory_allocated(device=device)
|
| 11 |
+
pipe = DiffusionPipeline.from_pretrained("Yntec/DreamPhotoGASM", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
|
| 12 |
+
pipeline.load_lora_weights("ovieyra21/autotrain-begg7-ozit5")
|
| 13 |
pipe.enable_xformers_memory_efficient_attention()
|
| 14 |
pipe = pipe.to(device)
|
| 15 |
else:
|
| 16 |
+
pipe = DiffusionPipeline.from_pretrained("Yntec/DreamPhotoGASM", use_safetensors=True)
|
| 17 |
pipe = pipe.to(device)
|
| 18 |
|
| 19 |
MAX_SEED = np.iinfo(np.int32).max
|