Spaces:
Sleeping
Sleeping
Zhen Ye commited on
Commit ·
e481415
1
Parent(s): f344561
Respect CUDA_VISIBLE_DEVICES env var (do not delete it)
Browse files- inference.py +10 -8
inference.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
| 1 |
# CRITICAL: Clear CUDA_VISIBLE_DEVICES BEFORE any imports
|
| 2 |
# HF Spaces may set this to "0" dynamically, locking us to a single GPU
|
|
|
|
|
|
|
|
|
|
| 3 |
import os
|
| 4 |
-
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 5 |
-
del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 6 |
|
| 7 |
import logging
|
| 8 |
import time
|
|
@@ -1453,12 +1454,13 @@ def run_segmentation(
|
|
| 1453 |
logging.info(f"[DEBUG] Segmentation PID: {os.getpid()}")
|
| 1454 |
logging.info(f"[DEBUG] CUDA_VISIBLE_DEVICES before clear: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
|
| 1455 |
|
| 1456 |
-
|
| 1457 |
-
|
| 1458 |
-
|
|
|
|
| 1459 |
|
| 1460 |
num_gpus = torch.cuda.device_count()
|
| 1461 |
-
logging.info(f"[DEBUG] num_gpus
|
| 1462 |
segmenters = []
|
| 1463 |
|
| 1464 |
if num_gpus > 0:
|
|
@@ -1680,8 +1682,8 @@ def run_depth_inference(
|
|
| 1680 |
num_gpus = torch.cuda.device_count()
|
| 1681 |
estimators = []
|
| 1682 |
|
| 1683 |
-
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 1684 |
-
|
| 1685 |
|
| 1686 |
if num_gpus > 0:
|
| 1687 |
logging.info("Detected %d GPUs. Loading depth estimators...", num_gpus)
|
|
|
|
| 1 |
# CRITICAL: Clear CUDA_VISIBLE_DEVICES BEFORE any imports
|
| 2 |
# HF Spaces may set this to "0" dynamically, locking us to a single GPU
|
| 3 |
+
# import os
|
| 4 |
+
# if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 5 |
+
# del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 6 |
import os
|
|
|
|
|
|
|
| 7 |
|
| 8 |
import logging
|
| 9 |
import time
|
|
|
|
| 1454 |
logging.info(f"[DEBUG] Segmentation PID: {os.getpid()}")
|
| 1455 |
logging.info(f"[DEBUG] CUDA_VISIBLE_DEVICES before clear: {os.environ.get('CUDA_VISIBLE_DEVICES')}")
|
| 1456 |
|
| 1457 |
+
|
| 1458 |
+
# if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 1459 |
+
# logging.info("[DEBUG] Deleting CUDA_VISIBLE_DEVICES from env (segmentation)")
|
| 1460 |
+
# del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 1461 |
|
| 1462 |
num_gpus = torch.cuda.device_count()
|
| 1463 |
+
logging.info(f"[DEBUG] num_gpus: {num_gpus}")
|
| 1464 |
segmenters = []
|
| 1465 |
|
| 1466 |
if num_gpus > 0:
|
|
|
|
| 1682 |
num_gpus = torch.cuda.device_count()
|
| 1683 |
estimators = []
|
| 1684 |
|
| 1685 |
+
# if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 1686 |
+
# del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 1687 |
|
| 1688 |
if num_gpus > 0:
|
| 1689 |
logging.info("Detected %d GPUs. Loading depth estimators...", num_gpus)
|