Olivia commited on
Commit
fac30cc
·
1 Parent(s): 4aca758

info endpoint

Browse files
Files changed (1) hide show
  1. app.py +64 -12
app.py CHANGED
@@ -66,8 +66,50 @@ except ImportError:
66
  # Configuration
67
  # ============================================================================
68
 
69
- DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
70
- print(f"Device: {DEVICE}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  if SPACES_AVAILABLE:
72
  print("ZeroGPU support enabled")
73
 
@@ -242,7 +284,7 @@ class VGGFeatureExtractor(nn.Module):
242
  _vgg_extractor = None
243
 
244
  def get_vgg_extractor():
245
- """Lazy load VGG feature extractor"""
246
  global _vgg_extractor
247
  if _vgg_extractor is None:
248
  _vgg_extractor = VGGFeatureExtractor().to(DEVICE)
@@ -528,16 +570,26 @@ def load_model(style: str, backend: str = 'auto') -> TransformerNet:
528
  print("=" * 50)
529
  print("StyleForge - Initializing...")
530
  print("=" * 50)
531
- print(f"Device: {DEVICE.type.upper()}")
 
 
 
 
532
  print(f"CUDA Kernels: {'Available' if CUDA_KERNELS_AVAILABLE else 'Not Available (will compile on first GPU task)'}")
533
- print("Preloading models...")
534
- for style in STYLES.keys():
535
- try:
536
- load_model(style, 'auto')
537
- print(f" {STYLES[style]}: Ready")
538
- except Exception as e:
539
- print(f" {STYLES[style]}: Failed - {e}")
540
- print("All models loaded!")
 
 
 
 
 
 
541
  print("=" * 50)
542
 
543
  # ============================================================================
 
66
  # Configuration
67
  # ============================================================================
68
 
69
+ # For ZeroGPU: Don't initialize CUDA at module level
70
+ # Device will be determined when needed within GPU tasks
71
+ _SPACES_ZERO_GPU = SPACES_AVAILABLE # From spaces import above
72
+
73
+ # Create a device proxy that works like torch.device but lazy-loads on ZeroGPU
74
+ class _DeviceProxy:
75
+ """Proxy for torch.device that lazy-loads CUDA on ZeroGPU"""
76
+
77
+ def __init__(self):
78
+ self._device = None
79
+
80
+ @property
81
+ def type(self):
82
+ self._ensure_device()
83
+ return self._device.type
84
+
85
+ def __str__(self):
86
+ self._ensure_device()
87
+ return str(self._device)
88
+
89
+ def __repr__(self):
90
+ self._ensure_device()
91
+ return repr(self._device)
92
+
93
+ def _ensure_device(self):
94
+ """Lazy device initialization - only calls torch.cuda.is_available() when needed"""
95
+ if self._device is None:
96
+ if torch.cuda.is_available():
97
+ self._device = torch.device('cuda')
98
+ else:
99
+ self._device = torch.device('cpu')
100
+
101
+ def __eq__(self, other):
102
+ return str(self) == str(other)
103
+
104
+
105
+ DEVICE = _DeviceProxy()
106
+
107
+ if _SPACES_ZERO_GPU:
108
+ print(f"Device: Will use CUDA within GPU tasks (ZeroGPU mode)")
109
+ else:
110
+ # Only access device if not ZeroGPU to avoid CUDA init
111
+ DEVICE._ensure_device()
112
+ print(f"Device: {DEVICE}")
113
  if SPACES_AVAILABLE:
114
  print("ZeroGPU support enabled")
115
 
 
284
  _vgg_extractor = None
285
 
286
  def get_vgg_extractor():
287
+ """Lazy load VGG feature extractor (with ZeroGPU support)"""
288
  global _vgg_extractor
289
  if _vgg_extractor is None:
290
  _vgg_extractor = VGGFeatureExtractor().to(DEVICE)
 
570
  print("=" * 50)
571
  print("StyleForge - Initializing...")
572
  print("=" * 50)
573
+ if _SPACES_ZERO_GPU:
574
+ print("Device: CUDA (ZeroGPU mode - lazy initialization)")
575
+ else:
576
+ DEVICE._ensure_device()
577
+ print(f"Device: {DEVICE.type.upper()}")
578
  print(f"CUDA Kernels: {'Available' if CUDA_KERNELS_AVAILABLE else 'Not Available (will compile on first GPU task)'}")
579
+
580
+ # Skip model preloading on ZeroGPU to avoid CUDA init in main process
581
+ if not _SPACES_ZERO_GPU:
582
+ print("Preloading models...")
583
+ for style in STYLES.keys():
584
+ try:
585
+ load_model(style, 'auto')
586
+ print(f" {STYLES[style]}: Ready")
587
+ except Exception as e:
588
+ print(f" {STYLES[style]}: Failed - {e}")
589
+ print("All models loaded!")
590
+ else:
591
+ print("ZeroGPU mode: Models will be loaded on-demand within GPU tasks")
592
+
593
  print("=" * 50)
594
 
595
  # ============================================================================