Spaces:
Sleeping
fix: Import spaces before torch to prevent CUDA initialization error
Browse filesProblem:
- RuntimeError on ZeroGPU: 'CUDA has been initialized before importing spaces package'
- torch was being imported before spaces could configure ZeroGPU properly
Solution:
- Move spaces import to top of file (before torch, transformers)
- Set ZEROGPU_AVAILABLE at import time instead of after environment detection
- Use already-imported ZEROGPU_AVAILABLE in detect_hardware_environment()
- Remove duplicate ZEROGPU_AVAILABLE assignment
Changes:
- Lines 9-15: Import spaces first with try/except
- Line 84: Use ZEROGPU_AVAILABLE directly instead of re-importing spaces
- Line 138: Remove duplicate assignment, add explanatory comment
This follows ZeroGPU best practice: import spaces before any CUDA package
https://huggingface.co/docs/hub/spaces-zerogpu#basic-usage
π€ Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
|
@@ -5,6 +5,16 @@ Supports: Local (Mac/Linux/Windows), HF Spaces (CPU Basic/Upgrade, ZeroGPU)
|
|
| 5 |
|
| 6 |
import os
|
| 7 |
import platform
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
import gradio as gr
|
| 9 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 10 |
from huggingface_hub import snapshot_download
|
|
@@ -70,15 +80,14 @@ def detect_hardware_environment():
|
|
| 70 |
env_info['platform'] = 'hf_spaces'
|
| 71 |
space_id = os.environ.get('SPACE_ID', 'unknown')
|
| 72 |
|
| 73 |
-
# Check for ZeroGPU
|
| 74 |
-
|
| 75 |
-
import spaces
|
| 76 |
env_info['hardware'] = 'zerogpu'
|
| 77 |
env_info['gpu_available'] = True
|
| 78 |
env_info['gpu_name'] = 'NVIDIA H200 (ZeroGPU)'
|
| 79 |
env_info['description'] = f"π HF Spaces - ZeroGPU ({space_id})"
|
| 80 |
env_info['cuda_compatible'] = True
|
| 81 |
-
|
| 82 |
# Check CPU tier by memory/CPU count
|
| 83 |
cpu_count = env_info['cpu_count']
|
| 84 |
if cpu_count >= 8:
|
|
@@ -126,7 +135,7 @@ def detect_hardware_environment():
|
|
| 126 |
|
| 127 |
# Detect hardware environment
|
| 128 |
HW_ENV = detect_hardware_environment()
|
| 129 |
-
ZEROGPU_AVAILABLE
|
| 130 |
|
| 131 |
# Print environment info
|
| 132 |
print("=" * 60)
|
|
|
|
| 5 |
|
| 6 |
import os
|
| 7 |
import platform
|
| 8 |
+
|
| 9 |
+
# IMPORTANT: Import spaces FIRST before any CUDA-related packages (torch, transformers)
|
| 10 |
+
# This prevents "CUDA has been initialized" error on ZeroGPU
|
| 11 |
+
try:
|
| 12 |
+
import spaces
|
| 13 |
+
ZEROGPU_AVAILABLE = True
|
| 14 |
+
except ImportError:
|
| 15 |
+
ZEROGPU_AVAILABLE = False
|
| 16 |
+
|
| 17 |
+
# Now safe to import CUDA-related packages
|
| 18 |
import gradio as gr
|
| 19 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 20 |
from huggingface_hub import snapshot_download
|
|
|
|
| 80 |
env_info['platform'] = 'hf_spaces'
|
| 81 |
space_id = os.environ.get('SPACE_ID', 'unknown')
|
| 82 |
|
| 83 |
+
# Check for ZeroGPU using already-imported status
|
| 84 |
+
if ZEROGPU_AVAILABLE:
|
|
|
|
| 85 |
env_info['hardware'] = 'zerogpu'
|
| 86 |
env_info['gpu_available'] = True
|
| 87 |
env_info['gpu_name'] = 'NVIDIA H200 (ZeroGPU)'
|
| 88 |
env_info['description'] = f"π HF Spaces - ZeroGPU ({space_id})"
|
| 89 |
env_info['cuda_compatible'] = True
|
| 90 |
+
else:
|
| 91 |
# Check CPU tier by memory/CPU count
|
| 92 |
cpu_count = env_info['cpu_count']
|
| 93 |
if cpu_count >= 8:
|
|
|
|
| 135 |
|
| 136 |
# Detect hardware environment
|
| 137 |
HW_ENV = detect_hardware_environment()
|
| 138 |
+
# Note: ZEROGPU_AVAILABLE already set at import time to prevent CUDA initialization errors
|
| 139 |
|
| 140 |
# Print environment info
|
| 141 |
print("=" * 60)
|