Spaces:
Sleeping
Sleeping
Zhen Ye
commited on
Commit
·
a69731b
1
Parent(s):
61fbbca
Fix: GPU detection order and distance consistency
Browse files- inference.py +8 -5
inference.py
CHANGED
|
@@ -455,14 +455,16 @@ def run_inference(
|
|
| 455 |
active_detector = detector_name or "hf_yolov8"
|
| 456 |
|
| 457 |
# 3. Parallel Model Loading
|
| 458 |
-
num_gpus = torch.cuda.device_count()
|
| 459 |
-
detectors = []
|
| 460 |
-
depth_estimators = []
|
| 461 |
|
| 462 |
# Clear CUDA_VISIBLE_DEVICES to ensure we see all GPUs if not already handled
|
|
|
|
| 463 |
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 464 |
del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 465 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 466 |
if num_gpus > 0:
|
| 467 |
logging.info("Detected %d GPUs. Loading models in parallel...", num_gpus)
|
| 468 |
|
|
@@ -671,12 +673,13 @@ def run_segmentation(
|
|
| 671 |
logging.info("Using segmenter: %s with queries: %s", active_segmenter, queries)
|
| 672 |
|
| 673 |
# 2. Load Segmenters (Parallel)
|
| 674 |
-
num_gpus = torch.cuda.device_count()
|
| 675 |
-
segmenters = []
|
| 676 |
|
| 677 |
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 678 |
del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 679 |
|
|
|
|
|
|
|
|
|
|
| 680 |
if num_gpus > 0:
|
| 681 |
logging.info("Detected %d GPUs. Loading segmenters...", num_gpus)
|
| 682 |
def load_seg(gpu_id: int):
|
|
|
|
| 455 |
active_detector = detector_name or "hf_yolov8"
|
| 456 |
|
| 457 |
# 3. Parallel Model Loading
|
|
|
|
|
|
|
|
|
|
| 458 |
|
| 459 |
# Clear CUDA_VISIBLE_DEVICES to ensure we see all GPUs if not already handled
|
| 460 |
+
# This must be done BEFORE any torch.cuda calls in this scope if the env was modified externally
|
| 461 |
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 462 |
del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 463 |
|
| 464 |
+
num_gpus = torch.cuda.device_count()
|
| 465 |
+
detectors = []
|
| 466 |
+
depth_estimators = []
|
| 467 |
+
|
| 468 |
if num_gpus > 0:
|
| 469 |
logging.info("Detected %d GPUs. Loading models in parallel...", num_gpus)
|
| 470 |
|
|
|
|
| 673 |
logging.info("Using segmenter: %s with queries: %s", active_segmenter, queries)
|
| 674 |
|
| 675 |
# 2. Load Segmenters (Parallel)
|
|
|
|
|
|
|
| 676 |
|
| 677 |
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 678 |
del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 679 |
|
| 680 |
+
num_gpus = torch.cuda.device_count()
|
| 681 |
+
segmenters = []
|
| 682 |
+
|
| 683 |
if num_gpus > 0:
|
| 684 |
logging.info("Detected %d GPUs. Loading segmenters...", num_gpus)
|
| 685 |
def load_seg(gpu_id: int):
|