chenchaoyun commited on
Commit
57a33d4
·
1 Parent(s): 41d3e8a
Files changed (1) hide show
  1. api_routes.py +104 -38
api_routes.py CHANGED
@@ -198,6 +198,19 @@ if CLIP_AVAILABLE:
198
  # 创建线程池执行器用于异步处理CPU密集型任务
199
  executor = ThreadPoolExecutor(max_workers=4)
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  async def process_cpu_intensive_task(func, *args, **kwargs):
202
  """
203
  异步执行CPU密集型任务
@@ -1192,28 +1205,42 @@ async def analyze_face(
1192
  valid_image_count = 0
1193
 
1194
  try:
1195
- t1 = time.perf_counter()
1196
 
1197
  # 处理每张图片
1198
  for idx, image_data in enumerate(image_data_list):
 
1199
  try:
 
 
1200
  np_arr = np.frombuffer(image_data, np.uint8)
1201
  image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
 
 
 
 
 
1202
 
1203
  if image is None:
1204
  logger.warning(f"无法解析第{idx+1}张图片")
1205
  continue
1206
 
1207
  # 生成MD5哈希
1208
- original_md5_hash = str(uuid.uuid4()).replace('-', '')
1209
  original_image_filename = f"{original_md5_hash}_original.webp"
1210
 
1211
  logger.info(
1212
- f"Processing image {idx+1}/{len(image_data_list)}, md5={original_md5_hash}, size={len(image_data) / 1024:.2f} KB"
1213
  )
1214
 
 
1215
  # 使用指定模型进行分析
1216
  result = analyzer.analyze_faces(image, original_md5_hash, model)
 
 
 
 
 
1217
 
1218
  # 如果该图片没有人脸,跳过
1219
  if not result.get("success") or result.get("face_count", 0) == 0:
@@ -1225,9 +1252,15 @@ async def analyze_face(
1225
 
1226
  if result.get("success") and annotated_image_np is not None:
1227
  original_image_path = os.path.join(OUTPUT_DIR, original_image_filename)
 
1228
  save_success = save_image_force_compress(
1229
  annotated_image_np, original_image_path, max_size_kb=100
1230
  )
 
 
 
 
 
1231
 
1232
  if save_success:
1233
  result["annotated_image_filename"] = original_image_filename
@@ -1240,25 +1273,26 @@ async def analyze_face(
1240
  genders: List[Any] = []
1241
  ages: List[Any] = []
1242
 
1243
- for face_info in faces:
1244
- beauty_value = float(
1245
- face_info.get("beauty_score") or 0.0)
1246
  beauty_scores.append(beauty_value)
1247
- age_models.append(
1248
- face_info.get("age_model_used"))
1249
- gender_models.append(
1250
- face_info.get("gender_model_used"))
1251
  genders.append(face_info.get("gender"))
1252
  ages.append(face_info.get("age"))
1253
 
1254
- cropped_filename = face_info.get(
1255
- "cropped_face_filename")
1256
  if cropped_filename:
1257
- cropped_path = os.path.join(IMAGES_DIR,
1258
- cropped_filename)
1259
  if os.path.exists(cropped_path):
1260
- bos_face = upload_file_to_bos(
1261
- cropped_path)
 
 
 
 
 
 
1262
  await _record_output_file(
1263
  file_path=cropped_path,
1264
  nickname=nickname,
@@ -1269,17 +1303,20 @@ async def analyze_face(
1269
  "source": "analyze",
1270
  "role": "face_crop",
1271
  "model": model.value,
1272
- "face_id": face_info.get(
1273
- "face_id"),
1274
- "gender": face_info.get(
1275
- "gender"),
1276
  "age": face_info.get("age"),
1277
  },
1278
  )
 
 
 
 
 
1279
 
1280
- max_beauty_score = max(
1281
- beauty_scores) if beauty_scores else 0.0
1282
 
 
1283
  await _record_output_file(
1284
  file_path=original_image_path,
1285
  nickname=nickname,
@@ -1291,16 +1328,27 @@ async def analyze_face(
1291
  "model": model.value,
1292
  },
1293
  )
 
 
 
 
 
1294
 
1295
  # 异步执行图片向量化并入库,不阻塞主流程
1296
  if CLIP_AVAILABLE:
1297
  # 先保存原始图片到IMAGES_DIR供向量化使用
1298
- original_input_path = os.path.join(IMAGES_DIR,
1299
- original_image_filename)
1300
  input_save_success = save_image_high_quality(
1301
- image, original_input_path,
1302
- quality=SAVE_QUALITY)
 
 
 
 
 
1303
  if input_save_success:
 
1304
  await _record_output_file(
1305
  file_path=original_input_path,
1306
  nickname=nickname,
@@ -1312,13 +1360,26 @@ async def analyze_face(
1312
  "model": model.value,
1313
  },
1314
  )
 
 
 
 
 
 
1315
  asyncio.create_task(
1316
  handle_image_vector_async(
1317
- original_input_path,
1318
- original_image_filename))
 
 
 
 
 
 
1319
 
 
1320
  logger.info(
1321
- f"<-------- Image {idx+1} processing completed, elapsed: {time.perf_counter() - t1:.3f}s, faces={len(faces)}, beauty={beauty_scores}, age={ages} via {age_models}, gender={genders} via {gender_models} --------"
1322
  )
1323
 
1324
  # 添加到结果列表
@@ -1335,12 +1396,14 @@ async def analyze_face(
1335
  # 如果没有有效图片,返回错误
1336
  if valid_image_count == 0:
1337
  logger.info("<-------- All images processing completed, no faces detected in any image --------")
1338
- return JSONResponse(content={
1339
- "success": False,
1340
- "message": "请尝试上传清晰、无遮挡的正面照片",
1341
- "face_count": 0,
1342
- "faces": []
1343
- })
 
 
1344
 
1345
  # 合并所有结果
1346
  combined_result = {
@@ -1350,11 +1413,11 @@ async def analyze_face(
1350
  "faces": [
1351
  {
1352
  "face": face,
1353
- "annotated_image_filename": result.get("annotated_image_filename")
1354
  }
1355
  for result in all_results
1356
  for face in result["faces"]
1357
- ]
1358
  }
1359
 
1360
  # 保底:对女性年龄进行调整(如果年龄大于阈值且尚未调整)
@@ -1384,7 +1447,10 @@ async def analyze_face(
1384
 
1385
  # 转换所有 numpy 类型为原生 Python 类型
1386
  cleaned_result = convert_numpy_types(combined_result)
1387
- logger.info(f"<-------- All images processing completed, total time: {time.perf_counter() - t1:.3f}s, valid images: {valid_image_count} --------")
 
 
 
1388
  return JSONResponse(content=cleaned_result)
1389
 
1390
  except Exception as e:
 
198
  # 创建线程池执行器用于异步处理CPU密集型任务
199
  executor = ThreadPoolExecutor(max_workers=4)
200
 
201
+
202
+ def _log_stage_duration(stage: str, start_time: float, extra: str | None = None) -> float:
203
+ """
204
+ 统一的耗时日志输出,便于快速定位慢点。
205
+ """
206
+ elapsed = time.perf_counter() - start_time
207
+ if extra:
208
+ logger.info("耗时统计 | %s: %.3fs (%s)", stage, elapsed, extra)
209
+ else:
210
+ logger.info("耗时统计 | %s: %.3fs", stage, elapsed)
211
+ return elapsed
212
+
213
+
214
  async def process_cpu_intensive_task(func, *args, **kwargs):
215
  """
216
  异步执行CPU密集型任务
 
1205
  valid_image_count = 0
1206
 
1207
  try:
1208
+ overall_start = time.perf_counter()
1209
 
1210
  # 处理每张图片
1211
  for idx, image_data in enumerate(image_data_list):
1212
+ image_start = time.perf_counter()
1213
  try:
1214
+ image_size_kb = len(image_data) / 1024 if image_data else 0
1215
+ decode_start = time.perf_counter()
1216
  np_arr = np.frombuffer(image_data, np.uint8)
1217
  image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
1218
+ _log_stage_duration(
1219
+ "图片解码",
1220
+ decode_start,
1221
+ f"image_index={idx+1}, size={image_size_kb:.2f}KB, success={image is not None}",
1222
+ )
1223
 
1224
  if image is None:
1225
  logger.warning(f"无法解析第{idx+1}张图片")
1226
  continue
1227
 
1228
  # 生成MD5哈希
1229
+ original_md5_hash = str(uuid.uuid4()).replace("-", "")
1230
  original_image_filename = f"{original_md5_hash}_original.webp"
1231
 
1232
  logger.info(
1233
+ f"Processing image {idx+1}/{len(image_data_list)}, md5={original_md5_hash}, size={image_size_kb:.2f} KB"
1234
  )
1235
 
1236
+ analysis_start = time.perf_counter()
1237
  # 使用指定模型进行分析
1238
  result = analyzer.analyze_faces(image, original_md5_hash, model)
1239
+ _log_stage_duration(
1240
+ "模型推理",
1241
+ analysis_start,
1242
+ f"image_index={idx+1}, model={model.value}, faces={result.get('face_count', 0)}",
1243
+ )
1244
 
1245
  # 如果该图片没有人脸,跳过
1246
  if not result.get("success") or result.get("face_count", 0) == 0:
 
1252
 
1253
  if result.get("success") and annotated_image_np is not None:
1254
  original_image_path = os.path.join(OUTPUT_DIR, original_image_filename)
1255
+ save_start = time.perf_counter()
1256
  save_success = save_image_force_compress(
1257
  annotated_image_np, original_image_path, max_size_kb=100
1258
  )
1259
+ _log_stage_duration(
1260
+ "标注图保存",
1261
+ save_start,
1262
+ f"image_index={idx+1}, path={original_image_path}, success={save_success}",
1263
+ )
1264
 
1265
  if save_success:
1266
  result["annotated_image_filename"] = original_image_filename
 
1273
  genders: List[Any] = []
1274
  ages: List[Any] = []
1275
 
1276
+ for face_idx, face_info in enumerate(faces, start=1):
1277
+ beauty_value = float(face_info.get("beauty_score") or 0.0)
 
1278
  beauty_scores.append(beauty_value)
1279
+ age_models.append(face_info.get("age_model_used"))
1280
+ gender_models.append(face_info.get("gender_model_used"))
 
 
1281
  genders.append(face_info.get("gender"))
1282
  ages.append(face_info.get("age"))
1283
 
1284
+ cropped_filename = face_info.get("cropped_face_filename")
 
1285
  if cropped_filename:
1286
+ cropped_path = os.path.join(IMAGES_DIR, cropped_filename)
 
1287
  if os.path.exists(cropped_path):
1288
+ upload_start = time.perf_counter()
1289
+ bos_face = upload_file_to_bos(cropped_path)
1290
+ _log_stage_duration(
1291
+ "BOS 上传(人脸)",
1292
+ upload_start,
1293
+ f"image_index={idx+1}, face_index={face_idx}, file={cropped_filename}, uploaded={bos_face}",
1294
+ )
1295
+ record_face_start = time.perf_counter()
1296
  await _record_output_file(
1297
  file_path=cropped_path,
1298
  nickname=nickname,
 
1303
  "source": "analyze",
1304
  "role": "face_crop",
1305
  "model": model.value,
1306
+ "face_id": face_info.get("face_id"),
1307
+ "gender": face_info.get("gender"),
 
 
1308
  "age": face_info.get("age"),
1309
  },
1310
  )
1311
+ _log_stage_duration(
1312
+ "记录人脸文件",
1313
+ record_face_start,
1314
+ f"image_index={idx+1}, face_index={face_idx}, file={cropped_filename}",
1315
+ )
1316
 
1317
+ max_beauty_score = max(beauty_scores) if beauty_scores else 0.0
 
1318
 
1319
+ record_annotated_start = time.perf_counter()
1320
  await _record_output_file(
1321
  file_path=original_image_path,
1322
  nickname=nickname,
 
1328
  "model": model.value,
1329
  },
1330
  )
1331
+ _log_stage_duration(
1332
+ "记录标注文件",
1333
+ record_annotated_start,
1334
+ f"image_index={idx+1}, file={original_image_filename}",
1335
+ )
1336
 
1337
  # 异步执行图片向量化并入库,不阻塞主流程
1338
  if CLIP_AVAILABLE:
1339
  # 先保存原始图片到IMAGES_DIR供向量化使用
1340
+ original_input_path = os.path.join(IMAGES_DIR, original_image_filename)
1341
+ save_input_start = time.perf_counter()
1342
  input_save_success = save_image_high_quality(
1343
+ image, original_input_path, quality=SAVE_QUALITY
1344
+ )
1345
+ _log_stage_duration(
1346
+ "原图保存(CLIP)",
1347
+ save_input_start,
1348
+ f"image_index={idx+1}, success={input_save_success}",
1349
+ )
1350
  if input_save_success:
1351
+ record_input_start = time.perf_counter()
1352
  await _record_output_file(
1353
  file_path=original_input_path,
1354
  nickname=nickname,
 
1360
  "model": model.value,
1361
  },
1362
  )
1363
+ _log_stage_duration(
1364
+ "��录原图文件",
1365
+ record_input_start,
1366
+ f"image_index={idx+1}, file={original_image_filename}",
1367
+ )
1368
+ vector_schedule_start = time.perf_counter()
1369
  asyncio.create_task(
1370
  handle_image_vector_async(
1371
+ original_input_path, original_image_filename
1372
+ )
1373
+ )
1374
+ _log_stage_duration(
1375
+ "调度向量化任务",
1376
+ vector_schedule_start,
1377
+ f"image_index={idx+1}, file={original_image_filename}",
1378
+ )
1379
 
1380
+ image_elapsed = time.perf_counter() - image_start
1381
  logger.info(
1382
+ f"<-------- Image {idx+1} processing completed, elapsed: {image_elapsed:.3f}s, faces={len(faces)}, beauty={beauty_scores}, age={ages} via {age_models}, gender={genders} via {gender_models} --------"
1383
  )
1384
 
1385
  # 添加到结果列表
 
1396
  # 如果没有有效图片,返回错误
1397
  if valid_image_count == 0:
1398
  logger.info("<-------- All images processing completed, no faces detected in any image --------")
1399
+ return JSONResponse(
1400
+ content={
1401
+ "success": False,
1402
+ "message": "请尝试上传清晰、无遮挡的正面照片",
1403
+ "face_count": 0,
1404
+ "faces": [],
1405
+ }
1406
+ )
1407
 
1408
  # 合并所有结果
1409
  combined_result = {
 
1413
  "faces": [
1414
  {
1415
  "face": face,
1416
+ "annotated_image_filename": result.get("annotated_image_filename"),
1417
  }
1418
  for result in all_results
1419
  for face in result["faces"]
1420
+ ],
1421
  }
1422
 
1423
  # 保底:对女性年龄进行调整(如果年龄大于阈值且尚未调整)
 
1447
 
1448
  # 转换所有 numpy 类型为原生 Python 类型
1449
  cleaned_result = convert_numpy_types(combined_result)
1450
+ total_elapsed = time.perf_counter() - overall_start
1451
+ logger.info(
1452
+ f"<-------- All images processing completed, total time: {total_elapsed:.3f}s, valid images: {valid_image_count} --------"
1453
+ )
1454
  return JSONResponse(content=cleaned_result)
1455
 
1456
  except Exception as e: