jarondon82 commited on
Commit
b854a11
·
1 Parent(s): e92eeea

Corregir problemas de indentación en el try-except de la clase VideoProcessor

Browse files
Files changed (1) hide show
  1. streamlit_app.py +28 -34
streamlit_app.py CHANGED
@@ -2247,8 +2247,8 @@ def main():
2247
  scale_factor = 0.5
2248
  h, w = img.shape[:2]
2249
  small_img = safe_resize(img, (int(w * scale_factor), int(h * scale_factor)))
2250
- if small_img is None:
2251
- return frame
2252
 
2253
  # Detect faces - la función ahora devuelve directamente los bboxes
2254
  bboxes = detect_face_dnn(face_net, small_img, confidence_threshold)
@@ -2260,44 +2260,38 @@ def main():
2260
  int(x1 / scale_factor),
2261
  int(y1 / scale_factor),
2262
  int(x2 / scale_factor),
2263
- int(y2 / scale_factor),
2264
  conf
2265
  ))
2266
 
2267
- # Update face count in session state
2268
  self.face_count = len(original_bboxes)
2269
- if self.frame_count % 15 == 0: # Actualizar métricas con menos frecuencia
2270
- if 'webrtc_face_count' not in st.session_state:
2271
- st.session_state.webrtc_face_count = 0
2272
- st.session_state.webrtc_face_count = self.face_count
2273
-
2274
- if 'webrtc_fps' not in st.session_state:
2275
- st.session_state.webrtc_fps = 0
2276
- elapsed = time.time() - self.start_time
2277
- st.session_state.webrtc_fps = 15 / elapsed if elapsed > 0 else 0
2278
- self.start_time = time.time()
2279
 
2280
- # Recognize each face
2281
- result_frame = img.copy()
 
2282
 
2283
- # Simplificar - solo dibujar rectángulos para esta versión rápida
2284
- for i, bbox in enumerate(original_bboxes):
2285
- x1, y1, x2, y2, _ = bbox
2286
- cv2.rectangle(result_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
2287
- cv2.putText(result_frame, f"Face {i+1}", (x1, y1-10),
2288
- cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
2289
 
2290
- return av.VideoFrame.from_ndarray(result_frame, format="bgr24")
2291
-
 
 
 
 
 
2292
  except Exception as e:
2293
- # En caso de cualquier error, mostrar mensaje en la imagen
2294
- error_frame = np.ones((480, 640, 3), dtype=np.uint8) * 255
2295
- error_msg = f"Error: {str(e)}"
2296
- cv2.putText(error_frame, error_msg[:50], (20, 240),
2297
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
2298
- cv2.putText(error_frame, "Intente usar opciones alternativas", (20, 280),
2299
- cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
2300
- return av.VideoFrame.from_ndarray(error_frame, format="bgr24")
2301
 
2302
  # Display WebRTC streamer
2303
  webrtc_ctx = webrtc_streamer(
@@ -2317,8 +2311,8 @@ def main():
2317
  # Update metrics if WebRTC is running
2318
  if webrtc_ctx.state.playing:
2319
  # Use a separate thread to update metrics
2320
- faces_metric.metric("Faces detected", st.session_state.get('webrtc_face_count', 0))
2321
- fps_metric.metric("FPS", f"{st.session_state.get('webrtc_fps', 0):.1f}")
2322
  time_metric.metric("Status", "Running")
2323
 
2324
  # Add instructions
 
2247
  scale_factor = 0.5
2248
  h, w = img.shape[:2]
2249
  small_img = safe_resize(img, (int(w * scale_factor), int(h * scale_factor)))
2250
+ if small_img is None:
2251
+ return av.VideoFrame.from_ndarray(img, format="bgr24")
2252
 
2253
  # Detect faces - la función ahora devuelve directamente los bboxes
2254
  bboxes = detect_face_dnn(face_net, small_img, confidence_threshold)
 
2260
  int(x1 / scale_factor),
2261
  int(y1 / scale_factor),
2262
  int(x2 / scale_factor),
2263
+ int(y2 / scale_factor),
2264
  conf
2265
  ))
2266
 
2267
+ # Actualizar contadores
2268
  self.face_count = len(original_bboxes)
2269
+ current_time = time.time()
2270
+ elapsed_time = current_time - self.start_time
2271
+ fps = self.frame_count / elapsed_time if elapsed_time > 0 else 0
 
 
 
 
 
 
 
2272
 
2273
+ # Actualizar métricas en session_state para que sean accesibles fuera
2274
+ st.session_state.faces_detected = self.face_count
2275
+ st.session_state.fps = fps
2276
 
2277
+ # Dibujar cajas de los rostros
2278
+ result_img = img.copy()
2279
+ for i, (x1, y1, x2, y2, conf) in enumerate(original_bboxes):
2280
+ cv2.rectangle(result_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
2281
+ cv2.putText(result_img, f"Face {i+1}: {conf:.2f}", (x1, y1-10),
2282
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
2283
 
2284
+ # Añadir información FPS y rostros
2285
+ cv2.putText(result_img, f"FPS: {fps:.1f}", (10, 30),
2286
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
2287
+ cv2.putText(result_img, f"Faces: {self.face_count}", (10, 60),
2288
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
2289
+
2290
+ return av.VideoFrame.from_ndarray(result_img, format="bgr24")
2291
  except Exception as e:
2292
+ print(f"Error en procesamiento de video: {str(e)}")
2293
+ # En caso de error, devolver el frame original
2294
+ return av.VideoFrame.from_ndarray(img, format="bgr24")
 
 
 
 
 
2295
 
2296
  # Display WebRTC streamer
2297
  webrtc_ctx = webrtc_streamer(
 
2311
  # Update metrics if WebRTC is running
2312
  if webrtc_ctx.state.playing:
2313
  # Use a separate thread to update metrics
2314
+ faces_metric.metric("Faces detected", st.session_state.get('faces_detected', 0))
2315
+ fps_metric.metric("FPS", f"{st.session_state.get('fps', 0):.1f}")
2316
  time_metric.metric("Status", "Running")
2317
 
2318
  # Add instructions