Update to latent-gcode diffusion model with SD 1.5
Browse files- app.py +3 -1
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -24,10 +24,12 @@ def get_generator():
|
|
| 24 |
dtype = torch.float16 if device == "cuda" else torch.float32
|
| 25 |
|
| 26 |
print("Loading Stable Diffusion pipeline...")
|
|
|
|
| 27 |
pipe = StableDiffusionPipeline.from_pretrained(
|
| 28 |
-
"
|
| 29 |
torch_dtype=dtype,
|
| 30 |
safety_checker=None,
|
|
|
|
| 31 |
).to(device)
|
| 32 |
|
| 33 |
print("Loading gcode decoder...")
|
|
|
|
| 24 |
dtype = torch.float16 if device == "cuda" else torch.float32
|
| 25 |
|
| 26 |
print("Loading Stable Diffusion pipeline...")
|
| 27 |
+
# Use SD 1.5 which is more reliably available
|
| 28 |
pipe = StableDiffusionPipeline.from_pretrained(
|
| 29 |
+
"runwayml/stable-diffusion-v1-5",
|
| 30 |
torch_dtype=dtype,
|
| 31 |
safety_checker=None,
|
| 32 |
+
use_safetensors=True,
|
| 33 |
).to(device)
|
| 34 |
|
| 35 |
print("Loading gcode decoder...")
|
requirements.txt
CHANGED
|
@@ -4,3 +4,4 @@ transformers>=4.36
|
|
| 4 |
diffusers>=0.25
|
| 5 |
accelerate>=0.25
|
| 6 |
huggingface_hub>=0.20
|
|
|
|
|
|
| 4 |
diffusers>=0.25
|
| 5 |
accelerate>=0.25
|
| 6 |
huggingface_hub>=0.20
|
| 7 |
+
safetensors>=0.4
|