PrashanthB461 commited on
Commit
a00ff02
·
verified ·
1 Parent(s): d8c7fcb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -75
app.py CHANGED
@@ -24,11 +24,9 @@ import shutil
24
  import tenacity
25
 
26
  # ========================== # Configuration and Setup # ==========================
27
- # Configure logging for better debugging
28
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
29
  logger = logging.getLogger(__name__)
30
 
31
- # Check for FFmpeg availability to ensure video processing works
32
  def check_ffmpeg():
33
  try:
34
  subprocess.run(["ffmpeg", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
@@ -42,21 +40,20 @@ FFMPEG_AVAILABLE = check_ffmpeg()
42
 
43
  # ========================== # ByteTrack Implementation # ==========================
44
  class BYTETracker:
45
- def __init__(self, track_thresh=0.3, track_buffer=30, match_thresh=0.7, frame_rate=30):
46
  self.track_thresh = track_thresh
47
- self.track_buffer = track_buffer
48
- self.match_thresh = match_thresh
49
  self.frame_rate = frame_rate
50
  self.next_id = 1
51
- self.tracks = {} # Store active tracks
52
- self.worker_history = {} # Track worker positions over time
53
- self.last_positions = {} # Last known positions of workers
54
 
55
  def update(self, dets, scores, cls):
56
  tracks = []
57
  current_time = time.time()
58
 
59
- # Update existing tracks with new detections
60
  for i, (det, score, cl) in enumerate(zip(dets, scores, cls)):
61
  if score < self.track_thresh:
62
  continue
@@ -80,15 +77,12 @@ class BYTETracker:
80
  matched = True
81
 
82
  if matched:
83
- # Update existing track
84
  self.tracks[best_track_id].update({
85
  'bbox': [x, y, w, h],
86
  'score': score,
87
  'cls': cl,
88
  'last_seen': current_time
89
  })
90
-
91
- # Update position history
92
  if best_track_id not in self.worker_history:
93
  self.worker_history[best_track_id] = []
94
  self.worker_history[best_track_id].append([x, y])
@@ -101,7 +95,6 @@ class BYTETracker:
101
  'cls': cl
102
  })
103
  else:
104
- # Create new track
105
  same_worker = False
106
  for worker_id, last_pos in self.last_positions.items():
107
  if self._is_same_worker([x, y], last_pos):
@@ -137,7 +130,6 @@ class BYTETracker:
137
  })
138
  self.next_id += 1
139
 
140
- # Clean up old tracks
141
  current_time = time.time()
142
  stale_ids = []
143
  for track_id, track_info in self.tracks.items():
@@ -152,31 +144,23 @@ class BYTETracker:
152
  del self.last_positions[track_id]
153
 
154
  return tracks
155
-
156
  def _calculate_iou(self, box1, box2):
157
- """Calculate IOU between two boxes"""
158
  x1, y1, w1, h1 = box1
159
  x2, y2, w2, h2 = box2
160
-
161
- # Calculate intersection coordinates
162
  x_left = max(x1 - w1/2, x2 - w2/2)
163
  y_top = max(y1 - h1/2, y2 - h2/2)
164
  x_right = min(x1 + w1/2, x2 + w2/2)
165
  y_bottom = min(y1 + h1/2, y2 + h2/2)
166
-
167
  if x_right < x_left or y_bottom < y_top:
168
  return 0.0
169
-
170
  intersection_area = (x_right - x_left) * (y_bottom - y_top)
171
-
172
  box1_area = w1 * h1
173
  box2_area = w2 * h2
174
-
175
  iou = intersection_area / (box1_area + box2_area - intersection_area)
176
  return iou
177
 
178
- def _is_same_worker(self, pos1, pos2, threshold=100):
179
- """Check if two positions likely belong to the same worker"""
180
  x1, y1 = pos1
181
  x2, y2 = pos2
182
  distance = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
@@ -194,11 +178,11 @@ CONFIG = {
194
  4: "improper_tool_use"
195
  },
196
  "CLASS_COLORS": {
197
- "no_helmet": (0, 0, 255), # Red
198
- "no_harness": (0, 165, 255), # Orange
199
- "unsafe_posture": (0, 255, 0), # Green
200
- "unsafe_zone": (255, 0, 0), # Blue
201
- "improper_tool_use": (255, 255, 0) # Cyan
202
  },
203
  "DISPLAY_NAMES": {
204
  "no_helmet": "No Helmet Violation",
@@ -225,14 +209,14 @@ CONFIG = {
225
  "VIOLATION_COOLDOWN": 30.0,
226
  "WORKER_TRACKING_DURATION": 5.0,
227
  "MAX_PROCESSING_TIME": 60,
228
- "FRAME_SKIP": 2,
229
- "BATCH_SIZE": 8, # Reduced batch size to lower memory usage
230
  "PARALLEL_WORKERS": max(1, cpu_count() - 1),
231
- "TRACK_BUFFER": 30,
232
  "TRACK_THRESH": 0.3,
233
- "MATCH_THRESH": 0.7,
234
  "SNAPSHOT_QUALITY": 95,
235
- "MAX_WORKER_DISTANCE": 100
236
  }
237
 
238
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -261,12 +245,10 @@ model = load_model()
261
 
262
  # ========================== # Helper Functions # ==========================
263
  def preprocess_frame(frame):
264
- """Apply basic preprocessing to enhance detection"""
265
  frame = cv2.convertScaleAbs(frame, alpha=1.2, beta=20)
266
  return frame
267
 
268
  def draw_detections(frame, detections):
269
- """Draw bounding boxes and labels on detection frame"""
270
  result_frame = frame.copy()
271
 
272
  for det in detections:
@@ -295,7 +277,6 @@ def draw_detections(frame, detections):
295
  return result_frame
296
 
297
  def calculate_safety_score(violations):
298
- """Calculate safety score based on detected violations"""
299
  penalties = {
300
  "no_helmet": 25,
301
  "no_harness": 30,
@@ -322,7 +303,6 @@ def calculate_safety_score(violations):
322
  return score
323
 
324
  def generate_violation_pdf(violations, score, output_dir):
325
- """Generate a PDF report for the detected violations"""
326
  try:
327
  pdf_filename = f"violations_{int(time.time())}.pdf"
328
  pdf_path = os.path.join(output_dir, pdf_filename)
@@ -401,7 +381,6 @@ def generate_violation_pdf(violations, score, output_dir):
401
 
402
  @retry(stop_max_attempt_number=3, wait_fixed=2000)
403
  def connect_to_salesforce():
404
- """Connect to Salesforce with retry logic"""
405
  try:
406
  sf = Salesforce(**CONFIG["SF_CREDENTIALS"])
407
  logger.info("Connected to Salesforce")
@@ -412,7 +391,6 @@ def connect_to_salesforce():
412
  raise
413
 
414
  def upload_pdf_to_salesforce(sf, pdf_file, report_id):
415
- """Upload PDF report to Salesforce"""
416
  try:
417
  if not pdf_file:
418
  logger.error("No PDF file provided for upload")
@@ -440,7 +418,6 @@ def upload_pdf_to_salesforce(sf, pdf_file, report_id):
440
  return ""
441
 
442
  def push_report_to_salesforce(violations, score, pdf_path, pdf_file):
443
- """Push violation report to Salesforce"""
444
  try:
445
  sf = connect_to_salesforce()
446
 
@@ -502,7 +479,6 @@ def push_report_to_salesforce(violations, score, pdf_path, pdf_file):
502
  before_sleep=lambda retry_state: logger.info(f"Retrying file access (attempt {retry_state.attempt_number}/3)...")
503
  )
504
  def verify_and_open_video(video_path):
505
- """Verify file existence and readability, then open with cv2.VideoCapture"""
506
  if not os.path.exists(video_path):
507
  raise FileNotFoundError(f"Temporary video file not found: {video_path}")
508
 
@@ -511,7 +487,7 @@ def verify_and_open_video(video_path):
511
  raise ValueError(f"Temporary video file is empty: {video_path}")
512
 
513
  with open(video_path, "rb") as f:
514
- f.read(1) # Test read access
515
 
516
  cap = cv2.VideoCapture(video_path)
517
  if not cap.isOpened():
@@ -520,14 +496,12 @@ def verify_and_open_video(video_path):
520
  return cap
521
 
522
  def process_video(video_data, temp_dir):
523
- """Process video to detect safety violations"""
524
  video_path = None
525
  output_dir = os.path.join(temp_dir, "output")
526
  os.makedirs(output_dir, exist_ok=True)
527
- os.environ['YOLO_CONFIG_DIR'] = temp_dir # Set YOLO config dir for this process
528
 
529
  try:
530
- # Validate video data
531
  if not video_data:
532
  raise ValueError("Empty video data provided.")
533
 
@@ -535,14 +509,12 @@ def process_video(video_data, temp_dir):
535
  if len(video_data) == 0:
536
  raise ValueError("Video data is empty.")
537
 
538
- # Save video to a temporary file
539
  with tempfile.NamedTemporaryFile(suffix=".mp4", dir=temp_dir, delete=False) as temp_file:
540
  temp_file.write(video_data)
541
  temp_file.flush()
542
  video_path = temp_file.name
543
  logger.info(f"Video saved to temporary file: {video_path}")
544
 
545
- # Verify the file exists and has content
546
  if not os.path.exists(video_path):
547
  raise FileNotFoundError(f"Temporary video file not found: {video_path}")
548
  file_size = os.path.getsize(video_path)
@@ -550,7 +522,6 @@ def process_video(video_data, temp_dir):
550
  raise ValueError(f"Temporary video file is empty: {video_path}")
551
  logger.info(f"Temporary video file size: {file_size} bytes")
552
 
553
- # Open video with OpenCV (with retries)
554
  cap = verify_and_open_video(video_path)
555
  logger.info(f"Successfully opened video file: {video_path}")
556
 
@@ -571,7 +542,9 @@ def process_video(video_data, temp_dir):
571
  frame_rate=fps
572
  )
573
 
574
- unique_violations = {}
 
 
575
  snapshots = []
576
  start_time = time.time()
577
  frame_skip = CONFIG["FRAME_SKIP"]
@@ -650,19 +623,28 @@ def process_video(video_data, temp_dir):
650
  )
651
 
652
  for obj in tracked_objects:
653
- worker_id = obj['id']
654
  label = CONFIG["VIOLATION_LABELS"].get(int(obj['cls']), None)
655
  conf = obj['score']
656
  bbox = obj['bbox']
657
 
658
  if label is None:
659
  continue
660
-
661
- if worker_id not in unique_violations:
662
- unique_violations[worker_id] = {}
663
 
664
- if label not in unique_violations[worker_id]:
665
- unique_violations[worker_id][label] = current_time
 
 
 
 
 
 
 
 
 
 
 
 
666
 
667
  detection = {
668
  "worker_id": worker_id,
@@ -712,19 +694,18 @@ def process_video(video_data, temp_dir):
712
  logger.info(f"Snapshots: {snapshots}")
713
 
714
  violations = []
715
- for worker_id, worker_violations in unique_violations.items():
716
- for label, detection_time in worker_violations.items():
717
- confidence = next(
718
- (float(s["confidence"]) for s in snapshots if s["worker_id"] == worker_id and s["violation"] == label),
719
- 0.0
720
- )
721
- violation = {
722
- "worker_id": worker_id,
723
- "violation": label,
724
- "timestamp": detection_time,
725
- "confidence": confidence
726
- }
727
- violations.append(violation)
728
 
729
  logger.info(f"Violations: {violations}")
730
 
@@ -785,18 +766,15 @@ def process_video(video_data, temp_dir):
785
  logger.error(f"Failed to clean up temporary video file {video_path}: {e}")
786
 
787
  def gradio_interface(video_file):
788
- """Gradio interface for the video processing"""
789
  temp_dir = None
790
  local_video_path = None
791
  try:
792
  if not video_file:
793
  return "No file uploaded.", "", "No file uploaded.", "", ""
794
 
795
- # Create a unique temporary directory for this video
796
  temp_dir = tempfile.mkdtemp(prefix="Ultralytics_")
797
  logger.info(f"Created temporary directory for video processing: {temp_dir}")
798
 
799
- # Copy Gradio's video file to a local temporary file
800
  with open(video_file, "rb") as f:
801
  video_data = f.read()
802
  logger.info(f"Read Gradio video file: {video_file}, size: {len(video_data)} bytes")
@@ -804,7 +782,6 @@ def gradio_interface(video_file):
804
  if len(video_data) == 0:
805
  return "Uploaded video file is empty.", "", "", "", ""
806
 
807
- # Save to a local temporary file to avoid Gradio file deletion
808
  with tempfile.NamedTemporaryFile(suffix=".mp4", dir=temp_dir, delete=False) as temp_file:
809
  temp_file.write(video_data)
810
  temp_file.flush()
@@ -821,7 +798,6 @@ def gradio_interface(video_file):
821
  logger.error(f"Error in Gradio interface: {e}", exc_info=True)
822
  yield f"Error: {str(e)}", "", "Error in processing.", "", ""
823
  finally:
824
- # Clean up the local temporary video file
825
  if local_video_path and os.path.exists(local_video_path):
826
  try:
827
  os.remove(local_video_path)
@@ -829,7 +805,6 @@ def gradio_interface(video_file):
829
  except Exception as e:
830
  logger.error(f"Failed to clean up local temporary video file {local_video_path}: {e}")
831
 
832
- # Clean up the temporary directory
833
  if temp_dir and os.path.exists(temp_dir):
834
  shutil.rmtree(temp_dir, ignore_errors=True)
835
  logger.info(f"Cleaned up temporary directory: {temp_dir}")
 
24
  import tenacity
25
 
26
  # ========================== # Configuration and Setup # ==========================
 
27
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
28
  logger = logging.getLogger(__name__)
29
 
 
30
  def check_ffmpeg():
31
  try:
32
  subprocess.run(["ffmpeg", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
 
40
 
41
  # ========================== # ByteTrack Implementation # ==========================
42
  class BYTETracker:
43
+ def __init__(self, track_thresh=0.3, track_buffer=90, match_thresh=0.5, frame_rate=30):
44
  self.track_thresh = track_thresh
45
+ self.track_buffer = track_buffer # Increased to 90 frames (3 seconds at 30 fps)
46
+ self.match_thresh = match_thresh # Lowered to 0.5 for better matching
47
  self.frame_rate = frame_rate
48
  self.next_id = 1
49
+ self.tracks = {}
50
+ self.worker_history = {}
51
+ self.last_positions = {}
52
 
53
  def update(self, dets, scores, cls):
54
  tracks = []
55
  current_time = time.time()
56
 
 
57
  for i, (det, score, cl) in enumerate(zip(dets, scores, cls)):
58
  if score < self.track_thresh:
59
  continue
 
77
  matched = True
78
 
79
  if matched:
 
80
  self.tracks[best_track_id].update({
81
  'bbox': [x, y, w, h],
82
  'score': score,
83
  'cls': cl,
84
  'last_seen': current_time
85
  })
 
 
86
  if best_track_id not in self.worker_history:
87
  self.worker_history[best_track_id] = []
88
  self.worker_history[best_track_id].append([x, y])
 
95
  'cls': cl
96
  })
97
  else:
 
98
  same_worker = False
99
  for worker_id, last_pos in self.last_positions.items():
100
  if self._is_same_worker([x, y], last_pos):
 
130
  })
131
  self.next_id += 1
132
 
 
133
  current_time = time.time()
134
  stale_ids = []
135
  for track_id, track_info in self.tracks.items():
 
144
  del self.last_positions[track_id]
145
 
146
  return tracks
147
+
148
  def _calculate_iou(self, box1, box2):
 
149
  x1, y1, w1, h1 = box1
150
  x2, y2, w2, h2 = box2
 
 
151
  x_left = max(x1 - w1/2, x2 - w2/2)
152
  y_top = max(y1 - h1/2, y2 - h2/2)
153
  x_right = min(x1 + w1/2, x2 + w2/2)
154
  y_bottom = min(y1 + h1/2, y2 + h2/2)
 
155
  if x_right < x_left or y_bottom < y_top:
156
  return 0.0
 
157
  intersection_area = (x_right - x_left) * (y_bottom - y_top)
 
158
  box1_area = w1 * h1
159
  box2_area = w2 * h2
 
160
  iou = intersection_area / (box1_area + box2_area - intersection_area)
161
  return iou
162
 
163
+ def _is_same_worker(self, pos1, pos2, threshold=300): # Increased to 300 pixels
 
164
  x1, y1 = pos1
165
  x2, y2 = pos2
166
  distance = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
 
178
  4: "improper_tool_use"
179
  },
180
  "CLASS_COLORS": {
181
+ "no_helmet": (0, 0, 255),
182
+ "no_harness": (0, 165, 255),
183
+ "unsafe_posture": (0, 255, 0),
184
+ "unsafe_zone": (255, 0, 0),
185
+ "improper_tool_use": (255, 255, 0)
186
  },
187
  "DISPLAY_NAMES": {
188
  "no_helmet": "No Helmet Violation",
 
209
  "VIOLATION_COOLDOWN": 30.0,
210
  "WORKER_TRACKING_DURATION": 5.0,
211
  "MAX_PROCESSING_TIME": 60,
212
+ "FRAME_SKIP": 1, # Reduced to 1 to process every frame
213
+ "BATCH_SIZE": 8,
214
  "PARALLEL_WORKERS": max(1, cpu_count() - 1),
215
+ "TRACK_BUFFER": 90, # Increased to 90
216
  "TRACK_THRESH": 0.3,
217
+ "MATCH_THRESH": 0.5, # Lowered to 0.5
218
  "SNAPSHOT_QUALITY": 95,
219
+ "MAX_WORKER_DISTANCE": 300 # Increased to 300
220
  }
221
 
222
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
245
 
246
  # ========================== # Helper Functions # ==========================
247
  def preprocess_frame(frame):
 
248
  frame = cv2.convertScaleAbs(frame, alpha=1.2, beta=20)
249
  return frame
250
 
251
  def draw_detections(frame, detections):
 
252
  result_frame = frame.copy()
253
 
254
  for det in detections:
 
277
  return result_frame
278
 
279
  def calculate_safety_score(violations):
 
280
  penalties = {
281
  "no_helmet": 25,
282
  "no_harness": 30,
 
303
  return score
304
 
305
  def generate_violation_pdf(violations, score, output_dir):
 
306
  try:
307
  pdf_filename = f"violations_{int(time.time())}.pdf"
308
  pdf_path = os.path.join(output_dir, pdf_filename)
 
381
 
382
  @retry(stop_max_attempt_number=3, wait_fixed=2000)
383
  def connect_to_salesforce():
 
384
  try:
385
  sf = Salesforce(**CONFIG["SF_CREDENTIALS"])
386
  logger.info("Connected to Salesforce")
 
391
  raise
392
 
393
  def upload_pdf_to_salesforce(sf, pdf_file, report_id):
 
394
  try:
395
  if not pdf_file:
396
  logger.error("No PDF file provided for upload")
 
418
  return ""
419
 
420
  def push_report_to_salesforce(violations, score, pdf_path, pdf_file):
 
421
  try:
422
  sf = connect_to_salesforce()
423
 
 
479
  before_sleep=lambda retry_state: logger.info(f"Retrying file access (attempt {retry_state.attempt_number}/3)...")
480
  )
481
  def verify_and_open_video(video_path):
 
482
  if not os.path.exists(video_path):
483
  raise FileNotFoundError(f"Temporary video file not found: {video_path}")
484
 
 
487
  raise ValueError(f"Temporary video file is empty: {video_path}")
488
 
489
  with open(video_path, "rb") as f:
490
+ f.read(1)
491
 
492
  cap = cv2.VideoCapture(video_path)
493
  if not cap.isOpened():
 
496
  return cap
497
 
498
  def process_video(video_data, temp_dir):
 
499
  video_path = None
500
  output_dir = os.path.join(temp_dir, "output")
501
  os.makedirs(output_dir, exist_ok=True)
502
+ os.environ['YOLO_CONFIG_DIR'] = temp_dir
503
 
504
  try:
 
505
  if not video_data:
506
  raise ValueError("Empty video data provided.")
507
 
 
509
  if len(video_data) == 0:
510
  raise ValueError("Video data is empty.")
511
 
 
512
  with tempfile.NamedTemporaryFile(suffix=".mp4", dir=temp_dir, delete=False) as temp_file:
513
  temp_file.write(video_data)
514
  temp_file.flush()
515
  video_path = temp_file.name
516
  logger.info(f"Video saved to temporary file: {video_path}")
517
 
 
518
  if not os.path.exists(video_path):
519
  raise FileNotFoundError(f"Temporary video file not found: {video_path}")
520
  file_size = os.path.getsize(video_path)
 
522
  raise ValueError(f"Temporary video file is empty: {video_path}")
523
  logger.info(f"Temporary video file size: {file_size} bytes")
524
 
 
525
  cap = verify_and_open_video(video_path)
526
  logger.info(f"Successfully opened video file: {video_path}")
527
 
 
542
  frame_rate=fps
543
  )
544
 
545
+ # Modified: Use a single worker ID since we know there's only one worker
546
+ worker_id_mapping = {} # Map tracker IDs to a single worker ID
547
+ unique_violations = {} # Keyed by violation type to ensure uniqueness
548
  snapshots = []
549
  start_time = time.time()
550
  frame_skip = CONFIG["FRAME_SKIP"]
 
623
  )
624
 
625
  for obj in tracked_objects:
626
+ tracker_id = obj['id']
627
  label = CONFIG["VIOLATION_LABELS"].get(int(obj['cls']), None)
628
  conf = obj['score']
629
  bbox = obj['bbox']
630
 
631
  if label is None:
632
  continue
 
 
 
633
 
634
+ # Map all tracker IDs to a single worker ID (since we know there's only one worker)
635
+ if not worker_id_mapping:
636
+ worker_id_mapping[tracker_id] = 1 # First worker ID is 1
637
+ else:
638
+ # Map all subsequent tracker IDs to the same worker ID
639
+ worker_id_mapping[tracker_id] = worker_id_mapping[list(worker_id_mapping.keys())[0]]
640
+
641
+ worker_id = worker_id_mapping[tracker_id]
642
+
643
+ # Use violation type as key to ensure uniqueness per worker
644
+ violation_key = (worker_id, label)
645
+
646
+ if violation_key not in unique_violations:
647
+ unique_violations[violation_key] = current_time
648
 
649
  detection = {
650
  "worker_id": worker_id,
 
694
  logger.info(f"Snapshots: {snapshots}")
695
 
696
  violations = []
697
+ for (worker_id, label), detection_time in unique_violations.items():
698
+ confidence = next(
699
+ (float(s["confidence"]) for s in snapshots if s["worker_id"] == worker_id and s["violation"] == label),
700
+ 0.0
701
+ )
702
+ violation = {
703
+ "worker_id": worker_id,
704
+ "violation": label,
705
+ "timestamp": detection_time,
706
+ "confidence": confidence
707
+ }
708
+ violations.append(violation)
 
709
 
710
  logger.info(f"Violations: {violations}")
711
 
 
766
  logger.error(f"Failed to clean up temporary video file {video_path}: {e}")
767
 
768
  def gradio_interface(video_file):
 
769
  temp_dir = None
770
  local_video_path = None
771
  try:
772
  if not video_file:
773
  return "No file uploaded.", "", "No file uploaded.", "", ""
774
 
 
775
  temp_dir = tempfile.mkdtemp(prefix="Ultralytics_")
776
  logger.info(f"Created temporary directory for video processing: {temp_dir}")
777
 
 
778
  with open(video_file, "rb") as f:
779
  video_data = f.read()
780
  logger.info(f"Read Gradio video file: {video_file}, size: {len(video_data)} bytes")
 
782
  if len(video_data) == 0:
783
  return "Uploaded video file is empty.", "", "", "", ""
784
 
 
785
  with tempfile.NamedTemporaryFile(suffix=".mp4", dir=temp_dir, delete=False) as temp_file:
786
  temp_file.write(video_data)
787
  temp_file.flush()
 
798
  logger.error(f"Error in Gradio interface: {e}", exc_info=True)
799
  yield f"Error: {str(e)}", "", "Error in processing.", "", ""
800
  finally:
 
801
  if local_video_path and os.path.exists(local_video_path):
802
  try:
803
  os.remove(local_video_path)
 
805
  except Exception as e:
806
  logger.error(f"Failed to clean up local temporary video file {local_video_path}: {e}")
807
 
 
808
  if temp_dir and os.path.exists(temp_dir):
809
  shutil.rmtree(temp_dir, ignore_errors=True)
810
  logger.info(f"Cleaned up temporary directory: {temp_dir}")