sayshara commited on
Commit
f1659ba
Β·
1 Parent(s): 6b5f466

Optimizing

Browse files
Files changed (3) hide show
  1. app.py +4 -0
  2. diffqrcoder_wrapper.py +9 -4
  3. requirements.txt +2 -1
app.py CHANGED
@@ -16,6 +16,10 @@ def warmup():
16
  Run once on Space startup, on CPU only.
17
  Downloads models & builds pipeline into cache.
18
  """
 
 
 
 
19
  print("πŸ”₯ Warmup starting: downloading models & building pipeline on CPU...")
20
  pipe = load_pipeline()
21
  print("πŸ”₯ Warmup done. Pipeline ready on CPU.")
 
16
  Run once on Space startup, on CPU only.
17
  Downloads models & builds pipeline into cache.
18
  """
19
+ torch.backends.cuda.matmul.allow_tf32 = True
20
+ torch.backends.cudnn.allow_tf32 = True
21
+ torch.set_float32_matmul_precision("high")
22
+
23
  print("πŸ”₯ Warmup starting: downloading models & building pipeline on CPU...")
24
  pipe = load_pipeline()
25
  print("πŸ”₯ Warmup done. Pipeline ready on CPU.")
diffqrcoder_wrapper.py CHANGED
@@ -1,6 +1,6 @@
1
  # diffqrcoder_wrapper.py
2
  import torch
3
- from diffusers import ControlNetModel, DDIMScheduler
4
  from PIL import Image
5
  import qrcode
6
  from huggingface_hub import hf_hub_download
@@ -71,12 +71,17 @@ def load_pipeline():
71
  use_auth_token=True, # uses the Space's HF token
72
  )
73
 
74
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
75
 
76
  # Memory helpers – cheaper attention
77
  try:
78
- pipe.enable_attention_slicing()
79
- # Optional: pipe.enable_xformers_memory_efficient_attention()
 
 
 
 
 
80
  except Exception as e:
81
  print("⚠️ Could not enable attention optimizations:", repr(e))
82
 
 
1
  # diffqrcoder_wrapper.py
2
  import torch
3
+ from diffusers import ControlNetModel, DPMSolverMultistepScheduler
4
  from PIL import Image
5
  import qrcode
6
  from huggingface_hub import hf_hub_download
 
71
  use_auth_token=True, # uses the Space's HF token
72
  )
73
 
74
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
75
 
76
  # Memory helpers – cheaper attention
77
  try:
78
+ # pipe.enable_attention_slicing()
79
+ try:
80
+ pipe.enable_xformers_memory_efficient_attention()
81
+ print("βœ… xFormers attention enabled.")
82
+ except Exception as e:
83
+ print("⚠️ xFormers not available:", repr(e))
84
+
85
  except Exception as e:
86
  print("⚠️ Could not enable attention optimizations:", repr(e))
87
 
requirements.txt CHANGED
@@ -12,4 +12,5 @@ opencv-python==4.11.0.86
12
  accelerate==1.3.0
13
  safetensors
14
  Pillow
15
- qrcode
 
 
12
  accelerate==1.3.0
13
  safetensors
14
  Pillow
15
+ qrcode
16
+ xformers