meaculpitt commited on
Commit
fd6c712
Β·
verified Β·
1 Parent(s): eb01bf0

scorevision: push artifact

Browse files
Files changed (2) hide show
  1. chute_config.yml +2 -5
  2. miner.py +1 -90
chute_config.yml CHANGED
@@ -3,16 +3,13 @@ Image:
3
  run_command:
4
  - pip install --upgrade setuptools wheel
5
  - pip install 'numpy>=1.23' 'onnxruntime-gpu>=1.16' 'nvidia-cudnn-cu12' 'nvidia-cublas-cu12'
6
- 'nvidia-cuda-runtime-cu12' 'nvidia-cufft-cu12' 'nvidia-curand-cu12'
7
- 'nvidia-cusolver-cu12' 'nvidia-cusparse-cu12' 'nvidia-nvjitlink-cu12'
8
- 'tensorrt>=10.0' 'opencv-python-headless>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4'
9
- 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9' 'ensemble-boxes>=1.0' 'torch>=2.6,<3.0'
10
  NodeSelector:
11
  gpu_count: 1
12
  min_vram_gb_per_gpu: 16
13
  max_hourly_price_per_gpu: 2.0
14
  exclude:
15
- - '3090'
16
  - '5090'
17
  - b200
18
  - h200
 
3
  run_command:
4
  - pip install --upgrade setuptools wheel
5
  - pip install 'numpy>=1.23' 'onnxruntime-gpu>=1.16' 'nvidia-cudnn-cu12' 'nvidia-cublas-cu12'
6
+ 'opencv-python-headless>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0'
7
+ 'pyyaml>=6.0' 'aiohttp>=3.9' 'ensemble-boxes>=1.0' 'torch>=2.6,<3.0'
 
 
8
  NodeSelector:
9
  gpu_count: 1
10
  min_vram_gb_per_gpu: 16
11
  max_hourly_price_per_gpu: 2.0
12
  exclude:
 
13
  - '5090'
14
  - b200
15
  - h200
miner.py CHANGED
@@ -92,95 +92,6 @@ def _preload_cuda_libs():
92
 
93
  _preload_cuda_libs()
94
 
95
- import subprocess as _subprocess
96
- import sys as _sys
97
-
98
- def _try_gpu_ort():
99
- """Attempt runtime install of onnxruntime-gpu for CUDA inference."""
100
- import time as _t
101
- _t0 = _t.time()
102
-
103
- try:
104
- import torch
105
- print(f"[GPU_SETUP] torch={torch.__version__} cuda={torch.cuda.is_available()} "
106
- f"devices={torch.cuda.device_count()}", flush=True)
107
- if torch.cuda.is_available():
108
- print(f"[GPU_SETUP] GPU: {torch.cuda.get_device_name(0)}", flush=True)
109
- except Exception as e:
110
- print(f"[GPU_SETUP] torch check failed: {e}", flush=True)
111
-
112
- try:
113
- import onnxruntime as _ort
114
- providers = _ort.get_available_providers()
115
- print(f"[GPU_SETUP] ORT={_ort.__version__} providers={providers}", flush=True)
116
- if 'CUDAExecutionProvider' in providers:
117
- print("[GPU_SETUP] CUDAExecutionProvider already available!", flush=True)
118
- return
119
- except ImportError:
120
- print("[GPU_SETUP] onnxruntime not importable", flush=True)
121
- return
122
-
123
- result = _subprocess.run(
124
- [_sys.executable, '-m', 'pip', 'list', '--format=columns'],
125
- capture_output=True, text=True, timeout=15
126
- )
127
- for pkg in ['onnxruntime', 'torch', 'nvidia-cu', 'ultralytics']:
128
- for line in result.stdout.splitlines():
129
- if pkg.lower() in line.lower():
130
- print(f"[GPU_SETUP] pkg: {line.strip()}", flush=True)
131
-
132
- print("[GPU_SETUP] Attempting onnxruntime-gpu install...", flush=True)
133
- try:
134
- r1 = _subprocess.run(
135
- [_sys.executable, '-m', 'pip', 'uninstall', 'onnxruntime', '-y'],
136
- capture_output=True, text=True, timeout=30
137
- )
138
- print(f"[GPU_SETUP] uninstall rc={r1.returncode}", flush=True)
139
-
140
- r2 = _subprocess.run(
141
- [_sys.executable, '-m', 'pip', 'install', 'onnxruntime-gpu', '--no-deps'],
142
- capture_output=True, text=True, timeout=180
143
- )
144
- print(f"[GPU_SETUP] install rc={r2.returncode}", flush=True)
145
- if r2.stdout:
146
- for line in r2.stdout.strip().splitlines()[-3:]:
147
- print(f"[GPU_SETUP] stdout: {line}", flush=True)
148
- if r2.stderr:
149
- for line in r2.stderr.strip().splitlines()[-3:]:
150
- print(f"[GPU_SETUP] stderr: {line}", flush=True)
151
-
152
- if r2.returncode != 0:
153
- print("[GPU_SETUP] FAILED β€” reinstalling CPU onnxruntime", flush=True)
154
- _subprocess.run(
155
- [_sys.executable, '-m', 'pip', 'install', 'onnxruntime', '--no-deps'],
156
- capture_output=True, timeout=60
157
- )
158
- return
159
-
160
- for key in list(_sys.modules.keys()):
161
- if 'onnxruntime' in key:
162
- del _sys.modules[key]
163
-
164
- import onnxruntime as _ort2
165
- new_providers = _ort2.get_available_providers()
166
- _dt = _t.time() - _t0
167
- print(f"[GPU_SETUP] SUCCESS: ORT={_ort2.__version__} providers={new_providers} ({_dt:.1f}s)", flush=True)
168
-
169
- except Exception as e:
170
- print(f"[GPU_SETUP] EXCEPTION: {e}", flush=True)
171
- try:
172
- for key in list(_sys.modules.keys()):
173
- if 'onnxruntime' in key:
174
- del _sys.modules[key]
175
- _subprocess.run(
176
- [_sys.executable, '-m', 'pip', 'install', 'onnxruntime', '--no-deps'],
177
- capture_output=True, timeout=60
178
- )
179
- print("[GPU_SETUP] Restored CPU onnxruntime", flush=True)
180
- except Exception:
181
- pass
182
-
183
- _try_gpu_ort()
184
 
185
 
186
  from pathlib import Path
@@ -676,7 +587,7 @@ class Miner:
676
 
677
  def __repr__(self) -> str:
678
  trt_status = "TRT" if self._trt_ready else "CUDA (TRT building)"
679
- return f"Unified Miner v3.15 β€” person={trt_status}, background TRT engine build"
680
 
681
  # ── Vehicle preprocessing (letterbox) ───────────────────────────────────
682
 
 
92
 
93
  _preload_cuda_libs()
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
 
97
  from pathlib import Path
 
587
 
588
  def __repr__(self) -> str:
589
  trt_status = "TRT" if self._trt_ready else "CUDA (TRT building)"
590
+ return f"Unified Miner v3.16 β€” person={trt_status}, background TRT engine build"
591
 
592
  # ── Vehicle preprocessing (letterbox) ───────────────────────────────────
593