Spaces:
Running
on
Zero
Running
on
Zero
Optimizing
Browse files- app.py +4 -0
- diffqrcoder_wrapper.py +9 -4
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -16,6 +16,10 @@ def warmup():
|
|
| 16 |
Run once on Space startup, on CPU only.
|
| 17 |
Downloads models & builds pipeline into cache.
|
| 18 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
print("π₯ Warmup starting: downloading models & building pipeline on CPU...")
|
| 20 |
pipe = load_pipeline()
|
| 21 |
print("π₯ Warmup done. Pipeline ready on CPU.")
|
|
|
|
| 16 |
Run once on Space startup, on CPU only.
|
| 17 |
Downloads models & builds pipeline into cache.
|
| 18 |
"""
|
| 19 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 20 |
+
torch.backends.cudnn.allow_tf32 = True
|
| 21 |
+
torch.set_float32_matmul_precision("high")
|
| 22 |
+
|
| 23 |
print("π₯ Warmup starting: downloading models & building pipeline on CPU...")
|
| 24 |
pipe = load_pipeline()
|
| 25 |
print("π₯ Warmup done. Pipeline ready on CPU.")
|
diffqrcoder_wrapper.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# diffqrcoder_wrapper.py
|
| 2 |
import torch
|
| 3 |
-
from diffusers import ControlNetModel,
|
| 4 |
from PIL import Image
|
| 5 |
import qrcode
|
| 6 |
from huggingface_hub import hf_hub_download
|
|
@@ -71,12 +71,17 @@ def load_pipeline():
|
|
| 71 |
use_auth_token=True, # uses the Space's HF token
|
| 72 |
)
|
| 73 |
|
| 74 |
-
pipe.scheduler =
|
| 75 |
|
| 76 |
# Memory helpers β cheaper attention
|
| 77 |
try:
|
| 78 |
-
pipe.enable_attention_slicing()
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
except Exception as e:
|
| 81 |
print("β οΈ Could not enable attention optimizations:", repr(e))
|
| 82 |
|
|
|
|
| 1 |
# diffqrcoder_wrapper.py
|
| 2 |
import torch
|
| 3 |
+
from diffusers import ControlNetModel, DPMSolverMultistepScheduler
|
| 4 |
from PIL import Image
|
| 5 |
import qrcode
|
| 6 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 71 |
use_auth_token=True, # uses the Space's HF token
|
| 72 |
)
|
| 73 |
|
| 74 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
| 75 |
|
| 76 |
# Memory helpers β cheaper attention
|
| 77 |
try:
|
| 78 |
+
# pipe.enable_attention_slicing()
|
| 79 |
+
try:
|
| 80 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 81 |
+
print("β
xFormers attention enabled.")
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print("β οΈ xFormers not available:", repr(e))
|
| 84 |
+
|
| 85 |
except Exception as e:
|
| 86 |
print("β οΈ Could not enable attention optimizations:", repr(e))
|
| 87 |
|
requirements.txt
CHANGED
|
@@ -12,4 +12,5 @@ opencv-python==4.11.0.86
|
|
| 12 |
accelerate==1.3.0
|
| 13 |
safetensors
|
| 14 |
Pillow
|
| 15 |
-
qrcode
|
|
|
|
|
|
| 12 |
accelerate==1.3.0
|
| 13 |
safetensors
|
| 14 |
Pillow
|
| 15 |
+
qrcode
|
| 16 |
+
xformers
|