htrnguyen commited on
Commit
964ccb3
·
1 Parent(s): 97e8589

Suppress all debug logs in production mode

Browse files
Files changed (4) hide show
  1. api_server.py +1 -1
  2. extract.py +24 -13
  3. main.py +3 -2
  4. reengineer.py +13 -5
api_server.py CHANGED
@@ -49,7 +49,7 @@ async def analyze_with_video(file: UploadFile = File(...), background_tasks: Bac
49
 
50
  # Tạo video có overlay
51
  output_video = os.path.join(output_dir, "analyzed_video.mp4")
52
- reengineer_video(master_json, video_path, output_video)
53
 
54
  # Cleanup video gốc
55
  if os.path.exists(video_path):
 
49
 
50
  # Tạo video có overlay
51
  output_video = os.path.join(output_dir, "analyzed_video.mp4")
52
+ reengineer_video(master_json, video_path, output_video, production=True)
53
 
54
  # Cleanup video gốc
55
  if os.path.exists(video_path):
extract.py CHANGED
@@ -25,7 +25,7 @@ def smooth_probs(probs, window_size=5):
25
  # Chia lại để tổng xác suất mỗi frame = 1
26
  return smoothed / smoothed.sum(axis=1, keepdims=True)
27
 
28
- def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_video=False, skip_phase_images=False, return_dict=False):
29
  if not os.path.exists(video_path):
30
  print(f"Lỗi: Không tìm thấy file {video_path}")
31
  return
@@ -55,11 +55,13 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
55
  save_dict = torch.load(model_path, map_location=device)
56
  model.load_state_dict(save_dict['model_state_dict'])
57
  model.to(device).eval()
58
- print(f"Mô hình đã sẵn sàng trên {device} (Slow factor: {slow_factor})")
 
59
 
60
  transform = transforms.Compose([ToTensor(), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
61
 
62
- print(f"Đang đọc video {video_path}...")
 
63
  cap = cv2.VideoCapture(video_path)
64
  raw_frames = []
65
  while True:
@@ -74,7 +76,8 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
74
 
75
  # Frame Interpolation (Nội suy tuyến tính)
76
  if slow_factor < 1.0:
77
- print(f"Đang nội suy frame (Slow-mo {slow_factor}x)...")
 
78
  steps = int(1.0 / slow_factor)
79
  interpolated_full_res = []
80
  for i in range(len(raw_frames) - 1):
@@ -90,7 +93,8 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
90
  # Lưu file video slow-motion vật lý (optional)
91
  if not skip_slow_video:
92
  slow_video_path = os.path.join(output_dir, "slow_motion.mp4")
93
- print(f"Đang ghi file slow motion: {slow_video_path}")
 
94
 
95
  # Lấy lại FPS gốc (hoặc mặc định 30)
96
  cap = cv2.VideoCapture(video_path)
@@ -105,16 +109,18 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
105
  for frame in full_res_frames:
106
  out.write(frame)
107
  out.release()
108
- print(f"Đã tạo video slow motion: {slow_video_path}")
 
109
 
110
  else:
111
  full_res_frames = raw_frames
112
 
113
  # Tiền xử lý cho AI
114
- print("Đang tiền xử lý cho AI...")
 
115
  images = []
116
  input_size = 160
117
- for img in tqdm(full_res_frames):
118
  h, w = img.shape[:2]
119
  ratio = input_size / max(h, w)
120
  new_size = (int(w * ratio), int(h * ratio))
@@ -127,7 +133,8 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
127
  sample = transform({'images': np.asarray(images), 'labels': np.zeros(len(images))})
128
  img_tensor = sample['images'].unsqueeze(0).to(device)
129
 
130
- print("Đang chạy AI Inference...")
 
131
  with torch.no_grad():
132
  seq_length, batch, all_logits = 48, 0, []
133
  while batch * seq_length < img_tensor.shape[1]:
@@ -149,7 +156,8 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
149
  anchor_class = np.argmax(max_probs) # Class có độ tự tin cao nhất trong 8 sự kiện
150
  anchor_frame = np.argmax(probs_events[:, anchor_class])
151
 
152
- print(f"Detected Anchor: {labels[anchor_class]} at frame {anchor_frame} (conf: {max_probs[anchor_class]:.2f})")
 
153
 
154
  events = np.zeros(8, dtype=int)
155
  events[anchor_class] = anchor_frame
@@ -184,7 +192,8 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
184
  events[i] = total_frames - 1
185
  current_limit = events[i]
186
 
187
- print(f"Detected Events (Frames): {events}")
 
188
 
189
  # Lưu thông tin frame index để visual_report sử dụng
190
  event_metadata = {}
@@ -205,13 +214,15 @@ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_vi
205
 
206
  # Return dict nếu cần, hoặc ghi file (legacy)
207
  if return_dict:
208
- print("Extraction complete (dict mode)")
 
209
  return {"metadata": metadata}
210
  else:
211
  metadata_path = os.path.join(output_dir, "metadata.json")
212
  with open(metadata_path, 'w') as f:
213
  json.dump(metadata, f)
214
- print(f"Xong! Ảnh trích xuất lưu tại {phases_dir}")
 
215
  return metadata
216
 
217
  if __name__ == "__main__":
 
25
  # Chia lại để tổng xác suất mỗi frame = 1
26
  return smoothed / smoothed.sum(axis=1, keepdims=True)
27
 
28
+ def run_ai_extraction(video_path, slow_factor=1.0, output_dir=None, skip_slow_video=False, skip_phase_images=False, return_dict=False, production=False):
29
  if not os.path.exists(video_path):
30
  print(f"Lỗi: Không tìm thấy file {video_path}")
31
  return
 
55
  save_dict = torch.load(model_path, map_location=device)
56
  model.load_state_dict(save_dict['model_state_dict'])
57
  model.to(device).eval()
58
+ if not production:
59
+ print(f"Mô hình đã sẵn sàng trên {device} (Slow factor: {slow_factor})")
60
 
61
  transform = transforms.Compose([ToTensor(), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
62
 
63
+ if not production:
64
+ print(f"Đang đọc video {video_path}...")
65
  cap = cv2.VideoCapture(video_path)
66
  raw_frames = []
67
  while True:
 
76
 
77
  # Frame Interpolation (Nội suy tuyến tính)
78
  if slow_factor < 1.0:
79
+ if not production:
80
+ print(f"Đang nội suy frame (Slow-mo {slow_factor}x)...")
81
  steps = int(1.0 / slow_factor)
82
  interpolated_full_res = []
83
  for i in range(len(raw_frames) - 1):
 
93
  # Lưu file video slow-motion vật lý (optional)
94
  if not skip_slow_video:
95
  slow_video_path = os.path.join(output_dir, "slow_motion.mp4")
96
+ if not production:
97
+ print(f"Đang ghi file slow motion: {slow_video_path}")
98
 
99
  # Lấy lại FPS gốc (hoặc mặc định 30)
100
  cap = cv2.VideoCapture(video_path)
 
109
  for frame in full_res_frames:
110
  out.write(frame)
111
  out.release()
112
+ if not production:
113
+ print(f"Đã tạo video slow motion: {slow_video_path}")
114
 
115
  else:
116
  full_res_frames = raw_frames
117
 
118
  # Tiền xử lý cho AI
119
+ if not production:
120
+ print("Đang tiền xử lý cho AI...")
121
  images = []
122
  input_size = 160
123
+ for img in tqdm(full_res_frames, disable=production):
124
  h, w = img.shape[:2]
125
  ratio = input_size / max(h, w)
126
  new_size = (int(w * ratio), int(h * ratio))
 
133
  sample = transform({'images': np.asarray(images), 'labels': np.zeros(len(images))})
134
  img_tensor = sample['images'].unsqueeze(0).to(device)
135
 
136
+ if not production:
137
+ print("Đang chạy AI Inference...")
138
  with torch.no_grad():
139
  seq_length, batch, all_logits = 48, 0, []
140
  while batch * seq_length < img_tensor.shape[1]:
 
156
  anchor_class = np.argmax(max_probs) # Class có độ tự tin cao nhất trong 8 sự kiện
157
  anchor_frame = np.argmax(probs_events[:, anchor_class])
158
 
159
+ if not production:
160
+ print(f"Detected Anchor: {labels[anchor_class]} at frame {anchor_frame} (conf: {max_probs[anchor_class]:.2f})")
161
 
162
  events = np.zeros(8, dtype=int)
163
  events[anchor_class] = anchor_frame
 
192
  events[i] = total_frames - 1
193
  current_limit = events[i]
194
 
195
+ if not production:
196
+ print(f"Detected Events (Frames): {events}")
197
 
198
  # Lưu thông tin frame index để visual_report sử dụng
199
  event_metadata = {}
 
214
 
215
  # Return dict nếu cần, hoặc ghi file (legacy)
216
  if return_dict:
217
+ if not production:
218
+ print("Extraction complete (dict mode)")
219
  return {"metadata": metadata}
220
  else:
221
  metadata_path = os.path.join(output_dir, "metadata.json")
222
  with open(metadata_path, 'w') as f:
223
  json.dump(metadata, f)
224
+ if not production:
225
+ print(f"Xong! Ảnh trích xuất lưu tại {phases_dir}")
226
  return metadata
227
 
228
  if __name__ == "__main__":
main.py CHANGED
@@ -40,7 +40,8 @@ def analyze_video_fast(video_path, production=True, output_file=None, output_bas
40
  output_dir=output_dir,
41
  skip_slow_video=True, # Bỏ tạo slow-motion
42
  skip_phase_images=False, # CẦN ảnh phases để analyze.py phân tích
43
- return_dict=True # NEW: Return dict thay vì ghi file
 
44
  )
45
 
46
  if not extraction_result:
@@ -90,7 +91,7 @@ def analyze_video_fast(video_path, production=True, output_file=None, output_bas
90
  from reengineer import reengineer_video
91
  output_video = os.path.join(os.path.dirname(output_file), 'analyzed_video.mp4')
92
  print(f"\nĐang tạo video có overlay...")
93
- reengineer_video(output_file, video_path, output_video)
94
  print(f"Video đã lưu tại: {output_video}")
95
  except Exception as e:
96
  print(f"WARNING: Không thể tạo video overlay: {e}")
 
40
  output_dir=output_dir,
41
  skip_slow_video=True, # Bỏ tạo slow-motion
42
  skip_phase_images=False, # CẦN ảnh phases để analyze.py phân tích
43
+ return_dict=True, # NEW: Return dict thay vì ghi file
44
+ production=production # Tắt logs khi production=True
45
  )
46
 
47
  if not extraction_result:
 
91
  from reengineer import reengineer_video
92
  output_video = os.path.join(os.path.dirname(output_file), 'analyzed_video.mp4')
93
  print(f"\nĐang tạo video có overlay...")
94
+ reengineer_video(output_file, video_path, output_video, production=production)
95
  print(f"Video đã lưu tại: {output_video}")
96
  except Exception as e:
97
  print(f"WARNING: Không thể tạo video overlay: {e}")
reengineer.py CHANGED
@@ -12,8 +12,14 @@ if sys.stdout.encoding != 'utf-8':
12
  try:
13
  sys.stdout.reconfigure(encoding='utf-8')
14
  except AttributeError:
15
- import io
16
- sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
 
 
 
 
 
 
17
 
18
  POSE_CONNECTIONS = [
19
  (11, 12), (11, 13), (13, 15), (12, 14), (14, 16), # Vai - Khuỷu - Cổ tay
@@ -52,7 +58,7 @@ def draw_glass_panel(img, pt1, pt2, color=(0, 0, 0), alpha=0.5):
52
  cv2.rectangle(overlay, pt1, pt2, color, -1)
53
  return cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
54
 
55
- def reengineer_video(json_path, video_path, output_path=None):
56
  """
57
  Áp dụng dữ liệu JSON lên video gốc (nguyên kích thước) với overlay cao cấp.
58
  """
@@ -98,7 +104,8 @@ def reengineer_video(json_path, video_path, output_path=None):
98
  else:
99
  temp_output_path = final_output_path
100
 
101
- print(f"--- Đang tạo video Premium: {os.path.basename(video_path)} ({vw}x{vh}) ---")
 
102
 
103
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
104
  out = cv2.VideoWriter(temp_output_path, fourcc, fps_video, (vw, vh))
@@ -224,7 +231,8 @@ def reengineer_video(json_path, video_path, output_path=None):
224
  os.remove(video_path)
225
  os.rename(temp_output_path, final_output_path)
226
 
227
- print(f"--- HOÀN TẤT! Video lưu tại: {final_output_path} ---")
 
228
 
229
  if __name__ == "__main__":
230
  parser = argparse.ArgumentParser(description="Golf Video Re-engineer Tool")
 
12
  try:
13
  sys.stdout.reconfigure(encoding='utf-8')
14
  except AttributeError:
15
+ pass
16
+
17
+ # Tắt logs từ TensorFlow/MediaPipe trong production
18
+ import os as _os
19
+ _os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Tắt TF logs
20
+ import logging
21
+ logging.getLogger('tensorflow').setLevel(logging.ERROR)
22
+ logging.getLogger('mediapipe').setLevel(logging.ERROR)
23
 
24
  POSE_CONNECTIONS = [
25
  (11, 12), (11, 13), (13, 15), (12, 14), (14, 16), # Vai - Khuỷu - Cổ tay
 
58
  cv2.rectangle(overlay, pt1, pt2, color, -1)
59
  return cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
60
 
61
+ def reengineer_video(json_path, video_path, output_path=None, production=False):
62
  """
63
  Áp dụng dữ liệu JSON lên video gốc (nguyên kích thước) với overlay cao cấp.
64
  """
 
104
  else:
105
  temp_output_path = final_output_path
106
 
107
+ if not production:
108
+ print(f"--- Đang tạo video Premium: {os.path.basename(video_path)} ({vw}x{vh}) ---")
109
 
110
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
111
  out = cv2.VideoWriter(temp_output_path, fourcc, fps_video, (vw, vh))
 
231
  os.remove(video_path)
232
  os.rename(temp_output_path, final_output_path)
233
 
234
+ if not production:
235
+ print(f"--- HOÀN TẤT! Video lưu tại: {final_output_path} ---")
236
 
237
  if __name__ == "__main__":
238
  parser = argparse.ArgumentParser(description="Golf Video Re-engineer Tool")