Zhen Ye commited on
Commit
78f99f1
·
1 Parent(s): 6144c58

fixed overflow

Browse files
Files changed (2) hide show
  1. inference.py +21 -2
  2. jobs/background.py +2 -1
inference.py CHANGED
@@ -354,6 +354,7 @@ def run_depth_inference(
354
  output_video_path: str,
355
  max_frames: Optional[int] = None,
356
  depth_estimator_name: str = "depth_pro",
 
357
  job_id: Optional[str] = None,
358
  ) -> str:
359
  """
@@ -364,6 +365,7 @@ def run_depth_inference(
364
  output_video_path: Path to write depth visualization video
365
  max_frames: Optional frame limit for testing
366
  depth_estimator_name: Depth estimator to use (default: depth_pro)
 
367
  job_id: Optional job ID for cancellation support
368
 
369
  Returns:
@@ -388,6 +390,12 @@ def run_depth_inference(
388
  write_video(processed_frames, output_video_path, fps=fps, width=width, height=height)
389
  logging.info("Depth video written to: %s", output_video_path)
390
 
 
 
 
 
 
 
391
  return output_video_path
392
 
393
 
@@ -442,6 +450,7 @@ def process_frames_depth(
442
  global_min = 0.0
443
  global_max = 1.0
444
  else:
 
445
  global_min = float(np.percentile(valid_depths, 1)) # 1st percentile to clip outliers
446
  global_max = float(np.percentile(valid_depths, 99)) # 99th percentile
447
 
@@ -493,9 +502,19 @@ def colorize_depth_map(
493
  """
494
  import cv2
495
 
496
- # Replace NaN/inf with min value for visualization
497
  depth_clean = np.copy(depth_map)
498
- depth_clean[~np.isfinite(depth_clean)] = global_min
 
 
 
 
 
 
 
 
 
 
 
499
 
500
  if global_max - global_min < 1e-6: # Handle uniform depth
501
  depth_norm = np.zeros_like(depth_clean, dtype=np.uint8)
 
354
  output_video_path: str,
355
  max_frames: Optional[int] = None,
356
  depth_estimator_name: str = "depth_pro",
357
+ first_frame_depth_path: Optional[str] = None,
358
  job_id: Optional[str] = None,
359
  ) -> str:
360
  """
 
365
  output_video_path: Path to write depth visualization video
366
  max_frames: Optional frame limit for testing
367
  depth_estimator_name: Depth estimator to use (default: depth_pro)
368
+ first_frame_depth_path: Optional path to save the first depth visualization frame
369
  job_id: Optional job ID for cancellation support
370
 
371
  Returns:
 
390
  write_video(processed_frames, output_video_path, fps=fps, width=width, height=height)
391
  logging.info("Depth video written to: %s", output_video_path)
392
 
393
+ if first_frame_depth_path and processed_frames:
394
+ import cv2
395
+
396
+ if not cv2.imwrite(first_frame_depth_path, processed_frames[0]):
397
+ logging.warning("Failed to write first frame depth image to: %s", first_frame_depth_path)
398
+
399
  return output_video_path
400
 
401
 
 
450
  global_min = 0.0
451
  global_max = 1.0
452
  else:
453
+ valid_depths = valid_depths.astype(np.float64, copy=False)
454
  global_min = float(np.percentile(valid_depths, 1)) # 1st percentile to clip outliers
455
  global_max = float(np.percentile(valid_depths, 99)) # 99th percentile
456
 
 
502
  """
503
  import cv2
504
 
 
505
  depth_clean = np.copy(depth_map)
506
+ finite_mask = np.isfinite(depth_clean)
507
+ if not np.isfinite(global_min) or not np.isfinite(global_max):
508
+ if finite_mask.any():
509
+ local_depths = depth_clean[finite_mask].astype(np.float64, copy=False)
510
+ global_min = float(np.percentile(local_depths, 1))
511
+ global_max = float(np.percentile(local_depths, 99))
512
+ else:
513
+ global_min = 0.0
514
+ global_max = 1.0
515
+
516
+ # Replace NaN/inf with min value for visualization
517
+ depth_clean[~finite_mask] = global_min
518
 
519
  if global_max - global_min < 1e-6: # Handle uniform depth
520
  depth_norm = np.zeros_like(depth_clean, dtype=np.uint8)
jobs/background.py CHANGED
@@ -5,7 +5,7 @@ from datetime import datetime
5
  import torch
6
 
7
  from jobs.models import JobStatus
8
- from jobs.storage import get_job_storage, get_depth_output_path
9
  from inference import run_inference, run_segmentation, run_depth_inference
10
 
11
 
@@ -51,6 +51,7 @@ async def process_video_async(job_id: str) -> None:
51
  str(get_depth_output_path(job_id)),
52
  None, # max_frames
53
  job.depth_estimator_name,
 
54
  job_id,
55
  )
56
  logging.info("Depth estimation completed for job %s", job_id)
 
5
  import torch
6
 
7
  from jobs.models import JobStatus
8
+ from jobs.storage import get_job_storage, get_depth_output_path, get_first_frame_depth_path
9
  from inference import run_inference, run_segmentation, run_depth_inference
10
 
11
 
 
51
  str(get_depth_output_path(job_id)),
52
  None, # max_frames
53
  job.depth_estimator_name,
54
+ str(get_first_frame_depth_path(job_id)),
55
  job_id,
56
  )
57
  logging.info("Depth estimation completed for job %s", job_id)