Spaces:
Sleeping
Sleeping
Zhen Ye
commited on
Commit
路
be0e60f
1
Parent(s):
b5e6df2
Fix GPU visibility, update depth scale, and improve radar display timing
Browse files- LaserPerception/LaserPerception.js +6 -2
- app.py +2 -2
- inference.py +6 -1
LaserPerception/LaserPerception.js
CHANGED
|
@@ -5,7 +5,7 @@
|
|
| 5 |
- Tab 3: trade-space console
|
| 6 |
========================= */
|
| 7 |
|
| 8 |
-
|
| 9 |
const API_CONFIG = window.API_CONFIG || {};
|
| 10 |
const BACKEND_BASE = (() => {
|
| 11 |
const raw = (API_CONFIG.BACKEND_BASE || API_CONFIG.BASE_URL || "").trim();
|
|
@@ -1039,7 +1039,7 @@
|
|
| 1039 |
clearInterval(state.hf.asyncPollInterval);
|
| 1040 |
// Clear job ID to prevent cancel attempts after completion
|
| 1041 |
state.hf.asyncJobId = null;
|
| 1042 |
-
|
| 1043 |
resolve();
|
| 1044 |
} catch (err) {
|
| 1045 |
if (err && err.code === "VIDEO_PENDING") {
|
|
@@ -2028,6 +2028,10 @@
|
|
| 2028 |
setStatus("good", "READY 路 Reason complete (you can Engage)");
|
| 2029 |
log("Reason complete.", "g");
|
| 2030 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2031 |
// Generate intel summary (async)
|
| 2032 |
computeIntelSummary();
|
| 2033 |
} catch (err) {
|
|
|
|
| 5 |
- Tab 3: trade-space console
|
| 6 |
========================= */
|
| 7 |
|
| 8 |
+
(() => {
|
| 9 |
const API_CONFIG = window.API_CONFIG || {};
|
| 10 |
const BACKEND_BASE = (() => {
|
| 11 |
const raw = (API_CONFIG.BACKEND_BASE || API_CONFIG.BASE_URL || "").trim();
|
|
|
|
| 1039 |
clearInterval(state.hf.asyncPollInterval);
|
| 1040 |
// Clear job ID to prevent cancel attempts after completion
|
| 1041 |
state.hf.asyncJobId = null;
|
| 1042 |
+
setHfStatus("ready");
|
| 1043 |
resolve();
|
| 1044 |
} catch (err) {
|
| 1045 |
if (err && err.code === "VIDEO_PENDING") {
|
|
|
|
| 2028 |
setStatus("good", "READY 路 Reason complete (you can Engage)");
|
| 2029 |
log("Reason complete.", "g");
|
| 2030 |
|
| 2031 |
+
// Pre-seed tracks for Tab 2 so radar shows targets immediately
|
| 2032 |
+
seedTracksFromTab1();
|
| 2033 |
+
renderRadar();
|
| 2034 |
+
|
| 2035 |
// Generate intel summary (async)
|
| 2036 |
computeIntelSummary();
|
| 2037 |
} catch (err) {
|
app.py
CHANGED
|
@@ -244,7 +244,7 @@ async def detect_endpoint(
|
|
| 244 |
query_list,
|
| 245 |
detector_name=detector_name,
|
| 246 |
depth_estimator_name="depth", # Synch endpoint default
|
| 247 |
-
depth_scale=
|
| 248 |
)
|
| 249 |
except ValueError as exc:
|
| 250 |
logging.exception("Video processing failed.")
|
|
@@ -278,7 +278,7 @@ async def detect_async_endpoint(
|
|
| 278 |
detector: str = Form("hf_yolov8"),
|
| 279 |
segmenter: str = Form("sam3"),
|
| 280 |
depth_estimator: str = Form("depth"),
|
| 281 |
-
depth_scale: float = Form(
|
| 282 |
):
|
| 283 |
if mode not in VALID_MODES:
|
| 284 |
raise HTTPException(
|
|
|
|
| 244 |
query_list,
|
| 245 |
detector_name=detector_name,
|
| 246 |
depth_estimator_name="depth", # Synch endpoint default
|
| 247 |
+
depth_scale=25.0,
|
| 248 |
)
|
| 249 |
except ValueError as exc:
|
| 250 |
logging.exception("Video processing failed.")
|
|
|
|
| 278 |
detector: str = Form("hf_yolov8"),
|
| 279 |
segmenter: str = Form("sam3"),
|
| 280 |
depth_estimator: str = Form("depth"),
|
| 281 |
+
depth_scale: float = Form(25.0),
|
| 282 |
):
|
| 283 |
if mode not in VALID_MODES:
|
| 284 |
raise HTTPException(
|
inference.py
CHANGED
|
@@ -173,7 +173,7 @@ def _build_detection_records(
|
|
| 173 |
|
| 174 |
_MODEL_LOCKS: Dict[str, RLock] = {}
|
| 175 |
_MODEL_LOCKS_GUARD = RLock()
|
| 176 |
-
_DEPTH_SCALE = float(os.getenv("DEPTH_SCALE", "
|
| 177 |
|
| 178 |
|
| 179 |
def _get_model_lock(kind: str, name: str) -> RLock:
|
|
@@ -438,6 +438,11 @@ def run_inference(
|
|
| 438 |
logging.info("Using detector: %s", active_detector)
|
| 439 |
|
| 440 |
# Detect GPUs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
num_gpus = torch.cuda.device_count()
|
| 442 |
detectors = None
|
| 443 |
depth_estimators = None
|
|
|
|
| 173 |
|
| 174 |
_MODEL_LOCKS: Dict[str, RLock] = {}
|
| 175 |
_MODEL_LOCKS_GUARD = RLock()
|
| 176 |
+
_DEPTH_SCALE = float(os.getenv("DEPTH_SCALE", "25.0"))
|
| 177 |
|
| 178 |
|
| 179 |
def _get_model_lock(kind: str, name: str) -> RLock:
|
|
|
|
| 438 |
logging.info("Using detector: %s", active_detector)
|
| 439 |
|
| 440 |
# Detect GPUs
|
| 441 |
+
# Debug/Fix: Ensure internal restrictions don't hide GPUs
|
| 442 |
+
if "CUDA_VISIBLE_DEVICES" in os.environ:
|
| 443 |
+
logging.warning("Found CUDA_VISIBLE_DEVICES=%s in run_inference! Unsetting it.", os.environ["CUDA_VISIBLE_DEVICES"])
|
| 444 |
+
del os.environ["CUDA_VISIBLE_DEVICES"]
|
| 445 |
+
|
| 446 |
num_gpus = torch.cuda.device_count()
|
| 447 |
detectors = None
|
| 448 |
depth_estimators = None
|