bulatko commited on
Commit
744e084
·
1 Parent(s): 8ff8722

Deploy Zoo3D with Zero GPU support

Browse files
Files changed (3) hide show
  1. check_zerogpu_ready.sh +115 -0
  2. mvp.py +52 -9
  3. requirements.txt +2 -3
check_zerogpu_ready.sh ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ echo "🔍 Zero GPU Deployment Readiness Check"
4
+ echo "======================================"
5
+
6
+ # Color codes
7
+ RED='\033[0;31m'
8
+ GREEN='\033[0;32m'
9
+ YELLOW='\033[1;33m'
10
+ NC='\033[0m' # No Color
11
+
12
+ # Check if torch is in requirements.txt
13
+ echo -n "Checking requirements.txt for torch... "
14
+ if grep -q "^torch==" requirements.txt 2>/dev/null; then
15
+ echo -e "${RED}❌ FAIL${NC}"
16
+ echo " ERROR: torch found in requirements.txt - MUST be removed!"
17
+ exit 1
18
+ else
19
+ echo -e "${GREEN}✅ PASS${NC}"
20
+ fi
21
+
22
+ # Check if torchvision is in requirements.txt
23
+ echo -n "Checking requirements.txt for torchvision... "
24
+ if grep -q "^torchvision==" requirements.txt 2>/dev/null; then
25
+ echo -e "${RED}❌ FAIL${NC}"
26
+ echo " ERROR: torchvision found in requirements.txt - MUST be removed!"
27
+ exit 1
28
+ else
29
+ echo -e "${GREEN}✅ PASS${NC}"
30
+ fi
31
+
32
+ # Check if README_HF.md exists
33
+ echo -n "Checking for README_HF.md... "
34
+ if [ -f "README_HF.md" ]; then
35
+ echo -e "${GREEN}✅ EXISTS${NC}"
36
+
37
+ # Check for python_version in README
38
+ echo -n "Checking python_version in README_HF.md... "
39
+ if grep -q "python_version:" README_HF.md; then
40
+ echo -e "${GREEN}✅ FOUND${NC}"
41
+ else
42
+ echo -e "${YELLOW}⚠️ MISSING${NC}"
43
+ echo " WARNING: python_version not specified in README_HF.md"
44
+ fi
45
+ else
46
+ echo -e "${RED}❌ MISSING${NC}"
47
+ echo " ERROR: README_HF.md not found!"
48
+ exit 1
49
+ fi
50
+
51
+ # Check if packages.txt exists
52
+ echo -n "Checking for packages.txt... "
53
+ if [ -f "packages.txt" ]; then
54
+ echo -e "${GREEN}✅ EXISTS${NC}"
55
+ else
56
+ echo -e "${YELLOW}⚠️ MISSING${NC}"
57
+ echo " WARNING: packages.txt not found (may be needed for system dependencies)"
58
+ fi
59
+
60
+ # Check if app.py exists
61
+ echo -n "Checking for app.py... "
62
+ if [ -f "app.py" ]; then
63
+ echo -e "${GREEN}✅ EXISTS${NC}"
64
+ else
65
+ echo -e "${RED}❌ MISSING${NC}"
66
+ echo " ERROR: app.py not found!"
67
+ exit 1
68
+ fi
69
+
70
+ # Check if mvp.py exists
71
+ echo -n "Checking for mvp.py... "
72
+ if [ -f "mvp.py" ]; then
73
+ echo -e "${GREEN}✅ EXISTS${NC}"
74
+
75
+ # Check for spaces.GPU decorators
76
+ echo -n "Checking for @spaces.GPU decorators... "
77
+ if grep -q "@spaces.GPU" mvp.py; then
78
+ count=$(grep -c "@spaces.GPU" mvp.py)
79
+ echo -e "${GREEN}✅ FOUND ($count decorators)${NC}"
80
+ else
81
+ echo -e "${YELLOW}⚠️ NOT FOUND${NC}"
82
+ echo " WARNING: No @spaces.GPU decorators found"
83
+ fi
84
+ else
85
+ echo -e "${RED}❌ MISSING${NC}"
86
+ echo " ERROR: mvp.py not found!"
87
+ exit 1
88
+ fi
89
+
90
+ # Check gradio version
91
+ echo -n "Checking Gradio version... "
92
+ if grep -q "gradio==" requirements.txt; then
93
+ version=$(grep "gradio==" requirements.txt | cut -d'=' -f3)
94
+ major=$(echo $version | cut -d'.' -f1)
95
+ if [ "$major" -ge "4" ]; then
96
+ echo -e "${GREEN}✅ $version (>=4.0)${NC}"
97
+ else
98
+ echo -e "${RED}❌ $version (<4.0)${NC}"
99
+ echo " ERROR: Gradio version must be 4.0 or higher"
100
+ fi
101
+ else
102
+ echo -e "${YELLOW}⚠️ NOT SPECIFIED${NC}"
103
+ fi
104
+
105
+ echo ""
106
+ echo "======================================"
107
+ echo -e "${GREEN}✅ All critical checks passed!${NC}"
108
+ echo ""
109
+ echo "Next steps:"
110
+ echo "1. Copy files to your HF Space repository"
111
+ echo "2. Use README_HF.md as your README.md"
112
+ echo "3. Push to Hugging Face"
113
+ echo "4. Enable Zero GPU in Space settings"
114
+ echo ""
115
+ echo "For detailed instructions, see ZERO_GPU_FINAL_INSTRUCTIONS.md"
mvp.py CHANGED
@@ -6,8 +6,28 @@
6
 
7
  import os
8
  import cv2
9
- import torch
10
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  import gradio as gr
12
  import sys
13
  import shutil
@@ -86,14 +106,29 @@ cropformer_runner.make_cropformer_dir(MK_PATH)
86
  WORK_DIR = os.environ.get("ZOO3D_WORKDIR", os.path.join(tempfile.gettempdir(), "zoo3d"))
87
  os.makedirs(WORK_DIR, exist_ok=True)
88
  from visual_util import predictions_to_glb
89
- from vggt.models.vggt import VGGT
90
- from vggt.utils.load_fn import load_and_preprocess_images
91
- from vggt.utils.pose_enc import pose_encoding_to_extri_intri
92
- from vggt.utils.geometry import unproject_depth_map_to_point_map
93
 
94
- device = "cuda" if torch.cuda.is_available() else 'cpu'
95
-
96
- print(f"Using device: {device}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
  # CPU debug / compatibility knobs:
99
  # - On CPU, VGGT-1B inference is usually impractical. For debugging, we fall back to a lightweight
@@ -123,6 +158,10 @@ class ModelManager:
123
  def get_vggt_model(self):
124
  """Load VGGT model only when needed"""
125
  if self._vggt_model is None:
 
 
 
 
126
  if not torch.cuda.is_available():
127
  if not ZOO3D_ALLOW_CPU:
128
  raise RuntimeError("CUDA недоступна. Для этого Space нужен GPU (CUDA).")
@@ -149,6 +188,10 @@ class ModelManager:
149
  def get_metric3d_model(self):
150
  """Load Metric3D model only when needed"""
151
  if self._metric3d_model is None:
 
 
 
 
152
  if not torch.cuda.is_available():
153
  return None
154
 
@@ -187,7 +230,7 @@ class ModelManager:
187
 
188
  def clear_cache(self):
189
  """Clear GPU cache after processing"""
190
- if torch.cuda.is_available():
191
  torch.cuda.empty_cache()
192
  gc.collect()
193
 
 
6
 
7
  import os
8
  import cv2
 
9
  import numpy as np
10
+
11
+ # Import torch - will be provided by Zero GPU environment
12
+ try:
13
+ import torch
14
+ TORCH_AVAILABLE = True
15
+ except ImportError:
16
+ print("⚠️ PyTorch not found - will be installed by Zero GPU environment")
17
+ TORCH_AVAILABLE = False
18
+ # For local testing without torch
19
+ class DummyTorch:
20
+ cuda = type('cuda', (), {
21
+ 'is_available': lambda: False,
22
+ 'empty_cache': lambda: None,
23
+ 'get_device_capability': lambda: (0, 0),
24
+ 'get_device_name': lambda: 'No GPU'
25
+ })()
26
+ def no_grad(self): return self
27
+ def __enter__(self): return self
28
+ def __exit__(self, *args): pass
29
+ if not TORCH_AVAILABLE:
30
+ torch = DummyTorch()
31
  import gradio as gr
32
  import sys
33
  import shutil
 
106
  WORK_DIR = os.environ.get("ZOO3D_WORKDIR", os.path.join(tempfile.gettempdir(), "zoo3d"))
107
  os.makedirs(WORK_DIR, exist_ok=True)
108
  from visual_util import predictions_to_glb
 
 
 
 
109
 
110
+ # Import VGGT modules only if torch is available
111
+ if TORCH_AVAILABLE:
112
+ try:
113
+ from vggt.models.vggt import VGGT
114
+ from vggt.utils.load_fn import load_and_preprocess_images
115
+ from vggt.utils.pose_enc import pose_encoding_to_extri_intri
116
+ from vggt.utils.geometry import unproject_depth_map_to_point_map
117
+ VGGT_AVAILABLE = True
118
+ except ImportError as e:
119
+ print(f"⚠️ VGGT modules not available: {e}")
120
+ VGGT_AVAILABLE = False
121
+ else:
122
+ VGGT_AVAILABLE = False
123
+ print("⚠️ Skipping VGGT imports (PyTorch not available)")
124
+
125
+ # Set device based on torch availability
126
+ if TORCH_AVAILABLE:
127
+ device = "cuda" if torch.cuda.is_available() else 'cpu'
128
+ else:
129
+ device = "cpu"
130
+
131
+ print(f"Using device: {device} (PyTorch available: {TORCH_AVAILABLE})")
132
 
133
  # CPU debug / compatibility knobs:
134
  # - On CPU, VGGT-1B inference is usually impractical. For debugging, we fall back to a lightweight
 
158
  def get_vggt_model(self):
159
  """Load VGGT model only when needed"""
160
  if self._vggt_model is None:
161
+ if not TORCH_AVAILABLE or not VGGT_AVAILABLE:
162
+ print("⚠️ VGGT model cannot be loaded (PyTorch/VGGT not available)")
163
+ return None
164
+
165
  if not torch.cuda.is_available():
166
  if not ZOO3D_ALLOW_CPU:
167
  raise RuntimeError("CUDA недоступна. Для этого Space нужен GPU (CUDA).")
 
188
  def get_metric3d_model(self):
189
  """Load Metric3D model only when needed"""
190
  if self._metric3d_model is None:
191
+ if not TORCH_AVAILABLE:
192
+ print("⚠️ Metric3D model cannot be loaded (PyTorch not available)")
193
+ return None
194
+
195
  if not torch.cuda.is_available():
196
  return None
197
 
 
230
 
231
  def clear_cache(self):
232
  """Clear GPU cache after processing"""
233
+ if TORCH_AVAILABLE and torch.cuda.is_available():
234
  torch.cuda.empty_cache()
235
  gc.collect()
236
 
requirements.txt CHANGED
@@ -1,6 +1,5 @@
1
- # PyTorch - using version compatible with Zero GPU
2
- torch==2.2.0
3
- torchvision==0.17.0
4
  numpy==1.26.1
5
  Pillow
6
  huggingface_hub
 
1
+ # PyTorch and torchvision are automatically installed by Zero GPU
2
+ # DO NOT specify torch/torchvision here - it will cause conflicts!
 
3
  numpy==1.26.1
4
  Pillow
5
  huggingface_hub