chawin.chen commited on
Commit ·
3d725b3
1
Parent(s): efa793f
feat: implement date-based subdirectory storage with 'picture/' prefix for all image outputs
Browse files- api_routes.py +59 -36
- face_analyzer.py +3 -2
- utils.py +27 -4
api_routes.py
CHANGED
|
@@ -329,6 +329,7 @@ from utils import (
|
|
| 329 |
compress_image_by_file_size,
|
| 330 |
convert_image_format,
|
| 331 |
upload_file_to_bos,
|
|
|
|
| 332 |
ensure_bos_resources,
|
| 333 |
download_bos_directory,
|
| 334 |
)
|
|
@@ -740,10 +741,13 @@ async def _record_output_file(
|
|
| 740 |
score=score_value,
|
| 741 |
extra_metadata=extra,
|
| 742 |
)
|
|
|
|
|
|
|
|
|
|
| 743 |
duration = time.perf_counter() - start_time
|
| 744 |
logger.info(
|
| 745 |
"MySQL记录完成 file=%s category=%s nickname=%s score=%.4f bos_uploaded=%s cost=%.3fs",
|
| 746 |
-
|
| 747 |
category or "auto",
|
| 748 |
nickname or "",
|
| 749 |
score_value,
|
|
@@ -756,6 +760,17 @@ async def _record_output_file(
|
|
| 756 |
asyncio.create_task(_write_record())
|
| 757 |
|
| 758 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 759 |
async def _refresh_celebrity_cache(sample_image_path: str,
|
| 760 |
db_path: str) -> None:
|
| 761 |
"""刷新DeepFace数据库缓存"""
|
|
@@ -1009,8 +1024,8 @@ async def upload_file(
|
|
| 1009 |
logger.warning(
|
| 1010 |
"GFPGAN 修复器不可用,跳过修复,按原样保存证件照")
|
| 1011 |
# 按原样保存
|
| 1012 |
-
|
| 1013 |
-
saved_path =
|
| 1014 |
with open(saved_path, "wb") as f:
|
| 1015 |
f.write(contents)
|
| 1016 |
# bos_uploaded = upload_file_to_bos(saved_path)
|
|
@@ -1022,8 +1037,8 @@ async def upload_file(
|
|
| 1022 |
restored_image = await process_cpu_intensive_task(
|
| 1023 |
photo_restorer.restore_image, image)
|
| 1024 |
# 以 webp 高质量保存,命名与证件照区分
|
| 1025 |
-
|
| 1026 |
-
saved_path =
|
| 1027 |
if not save_image_high_quality(restored_image, saved_path,
|
| 1028 |
quality=SAVE_QUALITY):
|
| 1029 |
raise HTTPException(status_code=500,
|
|
@@ -1058,8 +1073,8 @@ async def upload_file(
|
|
| 1058 |
except Exception as e:
|
| 1059 |
logger.error(f"证件照上传修复流程失败,改为直接保存: {e}")
|
| 1060 |
# 失败兜底:直接保存原文件
|
| 1061 |
-
|
| 1062 |
-
saved_path =
|
| 1063 |
try:
|
| 1064 |
with open(saved_path, "wb") as f:
|
| 1065 |
f.write(contents)
|
|
@@ -1084,8 +1099,8 @@ async def upload_file(
|
|
| 1084 |
}
|
| 1085 |
|
| 1086 |
# 默认:普通文件直接保存原始内容
|
| 1087 |
-
|
| 1088 |
-
saved_path =
|
| 1089 |
try:
|
| 1090 |
with open(saved_path, "wb") as f:
|
| 1091 |
f.write(contents)
|
|
@@ -1123,8 +1138,8 @@ async def analyze_face(
|
|
| 1123 |
np_arr = np.frombuffer(contents, np.uint8)
|
| 1124 |
image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
|
| 1125 |
original_md5_hash = f"ugc_{str(uuid.uuid4()).replace('-', '')}"
|
| 1126 |
-
|
| 1127 |
-
original_image_path =
|
| 1128 |
save_image_high_quality(image, original_image_path, quality=SAVE_QUALITY, upload_to_bos=False)
|
| 1129 |
try:
|
| 1130 |
with open(original_image_path, "rb") as f:
|
|
@@ -1308,7 +1323,9 @@ async def analyze_face(
|
|
| 1308 |
|
| 1309 |
# 生成MD5哈希
|
| 1310 |
original_md5_hash = f"ugc_{str(uuid.uuid4()).replace('-', '')}"
|
| 1311 |
-
|
|
|
|
|
|
|
| 1312 |
|
| 1313 |
logger.info(
|
| 1314 |
f"Processing image {idx+1}/{len(image_data_list)}, md5={original_md5_hash}, size={image_size_kb:.2f} KB"
|
|
@@ -1332,7 +1349,7 @@ async def analyze_face(
|
|
| 1332 |
result["annotated_image_filename"] = None
|
| 1333 |
|
| 1334 |
if result.get("success") and annotated_image_np is not None:
|
| 1335 |
-
|
| 1336 |
save_start = time.perf_counter()
|
| 1337 |
save_success = save_image_high_quality(
|
| 1338 |
annotated_image_np, original_image_path, quality=SAVE_QUALITY
|
|
@@ -2260,7 +2277,7 @@ async def restore_old_photo(
|
|
| 2260 |
processed_height, processed_width = final_image.shape[:2]
|
| 2261 |
|
| 2262 |
# 保存最终处理后的图像到IMAGES_DIR(与人脸评分使用相同路径)
|
| 2263 |
-
restored_path =
|
| 2264 |
save_success = save_image_high_quality(
|
| 2265 |
final_image, restored_path, quality=SAVE_QUALITY
|
| 2266 |
)
|
|
@@ -2370,7 +2387,7 @@ async def colorize_photo(
|
|
| 2370 |
processed_height, processed_width = colorized_image.shape[:2]
|
| 2371 |
|
| 2372 |
# 保存上色后的图像到IMAGES_DIR
|
| 2373 |
-
colored_path =
|
| 2374 |
save_success = save_image_high_quality(
|
| 2375 |
colorized_image, colored_path, quality=SAVE_QUALITY
|
| 2376 |
)
|
|
@@ -2556,8 +2573,8 @@ async def anime_stylize_photo(
|
|
| 2556 |
bos_uploaded_flag = upload_file_to_bos(output_path)
|
| 2557 |
return True, bos_uploaded_flag
|
| 2558 |
|
| 2559 |
-
|
| 2560 |
-
original_path =
|
| 2561 |
if not os.path.exists(original_path):
|
| 2562 |
original_saved, original_bos_uploaded = _save_webp_and_upload(
|
| 2563 |
image, original_path, "动漫风格原图"
|
|
@@ -2570,7 +2587,8 @@ async def anime_stylize_photo(
|
|
| 2570 |
original_bos_uploaded = False
|
| 2571 |
|
| 2572 |
styled_uuid = f"ugc_{uuid.uuid4().hex}"
|
| 2573 |
-
|
|
|
|
| 2574 |
|
| 2575 |
# 获取风格描述
|
| 2576 |
style_descriptions = anime_stylizer.get_available_styles()
|
|
@@ -2604,7 +2622,7 @@ async def anime_stylize_photo(
|
|
| 2604 |
raise HTTPException(status_code=500, detail=f"动漫风格化处理失败: {str(e)}")
|
| 2605 |
|
| 2606 |
# 保存风格化后的图像到IMAGES_DIR
|
| 2607 |
-
styled_path
|
| 2608 |
save_success, bos_uploaded = _save_webp_and_upload(
|
| 2609 |
stylized_image, styled_path, "动漫风格结果图"
|
| 2610 |
)
|
|
@@ -2699,7 +2717,7 @@ async def grayscale_photo(
|
|
| 2699 |
raise HTTPException(status_code=500, detail=f"黑白化处理失败: {str(e)}")
|
| 2700 |
|
| 2701 |
# 保存黑白化后的图像到IMAGES_DIR
|
| 2702 |
-
grayscale_path =
|
| 2703 |
save_success = save_image_high_quality(
|
| 2704 |
grayscale_image, grayscale_path, quality=SAVE_QUALITY
|
| 2705 |
)
|
|
@@ -2816,7 +2834,7 @@ async def upscale_photo(
|
|
| 2816 |
upscaled_height, upscaled_width = upscaled_image.shape[:2]
|
| 2817 |
|
| 2818 |
# 保存超清后的图像到IMAGES_DIR(与其他接口保持一致)
|
| 2819 |
-
upscaled_path =
|
| 2820 |
save_success = save_image_high_quality(
|
| 2821 |
upscaled_image, upscaled_path, quality=SAVE_QUALITY
|
| 2822 |
)
|
|
@@ -3001,7 +3019,7 @@ async def remove_background(
|
|
| 3001 |
processed_height, processed_width = processed_image.shape[:2]
|
| 3002 |
|
| 3003 |
# 保存抠图后的图像到IMAGES_DIR(与facescore保持一致)
|
| 3004 |
-
processed_path =
|
| 3005 |
bos_uploaded = False
|
| 3006 |
|
| 3007 |
# 根据是否有透明背景选择保存方式
|
|
@@ -3179,7 +3197,7 @@ async def rvm_remove_background(
|
|
| 3179 |
processed_height, processed_width = processed_image.shape[:2]
|
| 3180 |
|
| 3181 |
# 保存抠图后的图像到IMAGES_DIR(与facescore保持一致)
|
| 3182 |
-
processed_path =
|
| 3183 |
bos_uploaded = False
|
| 3184 |
|
| 3185 |
# 根据是否有透明背景选择保存方式
|
|
@@ -3437,8 +3455,8 @@ async def split_grid_image(
|
|
| 3437 |
|
| 3438 |
# 生成格子文件名
|
| 3439 |
grid_index = row * cols + col + 1 # 从1开始编号
|
| 3440 |
-
|
| 3441 |
-
grid_path =
|
| 3442 |
|
| 3443 |
# 保存格子图片
|
| 3444 |
save_success = save_image_high_quality(grid_image, grid_path, quality=SAVE_QUALITY)
|
|
@@ -3460,8 +3478,8 @@ async def split_grid_image(
|
|
| 3460 |
)
|
| 3461 |
|
| 3462 |
# 同时保存原图到IMAGES_DIR供向量化使用
|
| 3463 |
-
|
| 3464 |
-
original_path =
|
| 3465 |
if save_image_high_quality(image, original_path, quality=SAVE_QUALITY):
|
| 3466 |
await _record_output_file(
|
| 3467 |
file_path=original_path,
|
|
@@ -3597,16 +3615,18 @@ async def compress_image(
|
|
| 3597 |
raise HTTPException(status_code=500, detail=f"压缩处理失败: {str(e)}")
|
| 3598 |
|
| 3599 |
# 保存压缩后的图像到IMAGES_DIR
|
| 3600 |
-
compressed_path =
|
| 3601 |
try:
|
| 3602 |
with open(compressed_path, "wb") as f:
|
| 3603 |
f.write(compressed_bytes)
|
|
|
|
| 3604 |
bos_uploaded = upload_file_to_bos(compressed_path)
|
| 3605 |
logger.info(f"Compressed image saved successfully: {compressed_path}")
|
| 3606 |
|
| 3607 |
# 异步执行图片向量化并入库,不阻塞主流程
|
| 3608 |
if CLIP_AVAILABLE:
|
| 3609 |
asyncio.create_task(handle_image_vector_async(compressed_path, compressed_filename))
|
|
|
|
| 3610 |
await _record_output_file(
|
| 3611 |
file_path=compressed_path,
|
| 3612 |
nickname=nickname,
|
|
@@ -4429,10 +4449,15 @@ async def face_similarity_verification(
|
|
| 4429 |
md5_hash2 = f"ugc_{str(uuid.uuid4()).replace('-', '')}"
|
| 4430 |
|
| 4431 |
# 生成文件名
|
| 4432 |
-
|
| 4433 |
-
|
| 4434 |
-
|
| 4435 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4436 |
|
| 4437 |
logger.info(f"Starting face similarity verification: {file1.filename} vs {file2.filename}")
|
| 4438 |
t1 = time.perf_counter()
|
|
@@ -4466,7 +4491,7 @@ async def face_similarity_verification(
|
|
| 4466 |
raise HTTPException(status_code=400, detail="第二张图片中未检测到人脸,请上传包含清晰人脸的图片")
|
| 4467 |
|
| 4468 |
# 保存原始图片到IMAGES_DIR(先不上传 BOS,供 DeepFace 使用)
|
| 4469 |
-
original_path1
|
| 4470 |
if not save_image_high_quality(
|
| 4471 |
image1,
|
| 4472 |
original_path1,
|
|
@@ -4475,7 +4500,6 @@ async def face_similarity_verification(
|
|
| 4475 |
):
|
| 4476 |
raise HTTPException(status_code=500, detail="保存第一张原始图片失败")
|
| 4477 |
|
| 4478 |
-
original_path2 = os.path.join(IMAGES_DIR, original_filename2)
|
| 4479 |
if not save_image_high_quality(
|
| 4480 |
image2,
|
| 4481 |
original_path2,
|
|
@@ -4631,8 +4655,7 @@ async def face_similarity_verification(
|
|
| 4631 |
face_img1 = image1[y1:y1_end, x1:x1_end]
|
| 4632 |
face_img2 = image2[y2:y2_end, x2:x2_end]
|
| 4633 |
|
| 4634 |
-
face_path1
|
| 4635 |
-
face_path2 = os.path.join(IMAGES_DIR, face_filename2)
|
| 4636 |
|
| 4637 |
# 直接保存裁剪图,不进行特征点绘制
|
| 4638 |
if save_image_high_quality(face_img1, face_path1, quality=SAVE_QUALITY):
|
|
|
|
| 329 |
compress_image_by_file_size,
|
| 330 |
convert_image_format,
|
| 331 |
upload_file_to_bos,
|
| 332 |
+
get_date_subfolder,
|
| 333 |
ensure_bos_resources,
|
| 334 |
download_bos_directory,
|
| 335 |
)
|
|
|
|
| 741 |
score=score_value,
|
| 742 |
extra_metadata=extra,
|
| 743 |
)
|
| 744 |
+
# 使用与数据库一致的路径转换
|
| 745 |
+
from database import _normalize_file_path
|
| 746 |
+
display_path = _normalize_file_path(file_path) or os.path.basename(file_path)
|
| 747 |
duration = time.perf_counter() - start_time
|
| 748 |
logger.info(
|
| 749 |
"MySQL记录完成 file=%s category=%s nickname=%s score=%.4f bos_uploaded=%s cost=%.3fs",
|
| 750 |
+
display_path,
|
| 751 |
category or "auto",
|
| 752 |
nickname or "",
|
| 753 |
score_value,
|
|
|
|
| 760 |
asyncio.create_task(_write_record())
|
| 761 |
|
| 762 |
|
| 763 |
+
def _get_save_path(filename: str, base_dir: str = IMAGES_DIR) -> Tuple[str, str]:
|
| 764 |
+
"""
|
| 765 |
+
返回完整的保存路径和相对于 base_dir 的规范化路径(带日期前缀)。
|
| 766 |
+
:return: (abs_path, rel_path)
|
| 767 |
+
"""
|
| 768 |
+
date_sub = get_date_subfolder(base_dir)
|
| 769 |
+
abs_path = os.path.join(base_dir, date_sub, filename)
|
| 770 |
+
rel_path = f"{date_sub}/{filename}"
|
| 771 |
+
return abs_path, rel_path
|
| 772 |
+
|
| 773 |
+
|
| 774 |
async def _refresh_celebrity_cache(sample_image_path: str,
|
| 775 |
db_path: str) -> None:
|
| 776 |
"""刷新DeepFace数据库缓存"""
|
|
|
|
| 1024 |
logger.warning(
|
| 1025 |
"GFPGAN 修复器不可用,跳过修复,按原样保存证件照")
|
| 1026 |
# 按原样保存
|
| 1027 |
+
filename_raw = f"{unique_id}_save_id_photo{file_extension}"
|
| 1028 |
+
saved_path, saved_filename = _get_save_path(filename_raw)
|
| 1029 |
with open(saved_path, "wb") as f:
|
| 1030 |
f.write(contents)
|
| 1031 |
# bos_uploaded = upload_file_to_bos(saved_path)
|
|
|
|
| 1037 |
restored_image = await process_cpu_intensive_task(
|
| 1038 |
photo_restorer.restore_image, image)
|
| 1039 |
# 以 webp 高质量保存,命名与证件照区分
|
| 1040 |
+
filename_raw = f"{unique_id}_save_id_photo_restore.webp"
|
| 1041 |
+
saved_path, saved_filename = _get_save_path(filename_raw)
|
| 1042 |
if not save_image_high_quality(restored_image, saved_path,
|
| 1043 |
quality=SAVE_QUALITY):
|
| 1044 |
raise HTTPException(status_code=500,
|
|
|
|
| 1073 |
except Exception as e:
|
| 1074 |
logger.error(f"证件照上传修复流程失败,改为直接保存: {e}")
|
| 1075 |
# 失败兜底:直接保存原文件
|
| 1076 |
+
filename_raw = f"{unique_id}_save_id_photo{file_extension}"
|
| 1077 |
+
saved_path, saved_filename = _get_save_path(filename_raw)
|
| 1078 |
try:
|
| 1079 |
with open(saved_path, "wb") as f:
|
| 1080 |
f.write(contents)
|
|
|
|
| 1099 |
}
|
| 1100 |
|
| 1101 |
# 默认:普通文件直接保存原始内容
|
| 1102 |
+
filename_raw = f"{unique_id}_save_file{file_extension}"
|
| 1103 |
+
saved_path, saved_filename = _get_save_path(filename_raw)
|
| 1104 |
try:
|
| 1105 |
with open(saved_path, "wb") as f:
|
| 1106 |
f.write(contents)
|
|
|
|
| 1138 |
np_arr = np.frombuffer(contents, np.uint8)
|
| 1139 |
image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
|
| 1140 |
original_md5_hash = f"ugc_{str(uuid.uuid4()).replace('-', '')}"
|
| 1141 |
+
original_image_filename_raw = f"{original_md5_hash}_original.webp"
|
| 1142 |
+
original_image_path, original_image_filename = _get_save_path(original_image_filename_raw)
|
| 1143 |
save_image_high_quality(image, original_image_path, quality=SAVE_QUALITY, upload_to_bos=False)
|
| 1144 |
try:
|
| 1145 |
with open(original_image_path, "rb") as f:
|
|
|
|
| 1323 |
|
| 1324 |
# 生成MD5哈希
|
| 1325 |
original_md5_hash = f"ugc_{str(uuid.uuid4()).replace('-', '')}"
|
| 1326 |
+
original_image_filename_raw = f"{original_md5_hash}_original.webp"
|
| 1327 |
+
# 获取带日期的保存路径和带日期的相对路径
|
| 1328 |
+
original_image_path, original_image_filename = _get_save_path(original_image_filename_raw, OUTPUT_DIR)
|
| 1329 |
|
| 1330 |
logger.info(
|
| 1331 |
f"Processing image {idx+1}/{len(image_data_list)}, md5={original_md5_hash}, size={image_size_kb:.2f} KB"
|
|
|
|
| 1349 |
result["annotated_image_filename"] = None
|
| 1350 |
|
| 1351 |
if result.get("success") and annotated_image_np is not None:
|
| 1352 |
+
# 注意:这里已经使用之前生成的 original_image_path
|
| 1353 |
save_start = time.perf_counter()
|
| 1354 |
save_success = save_image_high_quality(
|
| 1355 |
annotated_image_np, original_image_path, quality=SAVE_QUALITY
|
|
|
|
| 2277 |
processed_height, processed_width = final_image.shape[:2]
|
| 2278 |
|
| 2279 |
# 保存最终处理后的图像到IMAGES_DIR(与人脸评分使用相同路径)
|
| 2280 |
+
restored_path, restored_filename = _get_save_path(restored_filename)
|
| 2281 |
save_success = save_image_high_quality(
|
| 2282 |
final_image, restored_path, quality=SAVE_QUALITY
|
| 2283 |
)
|
|
|
|
| 2387 |
processed_height, processed_width = colorized_image.shape[:2]
|
| 2388 |
|
| 2389 |
# 保存上色后的图像到IMAGES_DIR
|
| 2390 |
+
colored_path, colored_filename = _get_save_path(colored_filename)
|
| 2391 |
save_success = save_image_high_quality(
|
| 2392 |
colorized_image, colored_path, quality=SAVE_QUALITY
|
| 2393 |
)
|
|
|
|
| 2573 |
bos_uploaded_flag = upload_file_to_bos(output_path)
|
| 2574 |
return True, bos_uploaded_flag
|
| 2575 |
|
| 2576 |
+
original_filename_raw = f"{original_md5_hash}_anime_style.webp"
|
| 2577 |
+
original_path, original_filename = _get_save_path(original_filename_raw)
|
| 2578 |
if not os.path.exists(original_path):
|
| 2579 |
original_saved, original_bos_uploaded = _save_webp_and_upload(
|
| 2580 |
image, original_path, "动漫风格原图"
|
|
|
|
| 2587 |
original_bos_uploaded = False
|
| 2588 |
|
| 2589 |
styled_uuid = f"ugc_{uuid.uuid4().hex}"
|
| 2590 |
+
styled_filename_raw = f"{styled_uuid}_anime_style_{style_type}.webp"
|
| 2591 |
+
styled_path, styled_filename = _get_save_path(styled_filename_raw)
|
| 2592 |
|
| 2593 |
# 获取风格描述
|
| 2594 |
style_descriptions = anime_stylizer.get_available_styles()
|
|
|
|
| 2622 |
raise HTTPException(status_code=500, detail=f"动漫风格化处理失败: {str(e)}")
|
| 2623 |
|
| 2624 |
# 保存风格化后的图像到IMAGES_DIR
|
| 2625 |
+
# styled_path 已经在前面通过 _get_save_path 生成
|
| 2626 |
save_success, bos_uploaded = _save_webp_and_upload(
|
| 2627 |
stylized_image, styled_path, "动漫风格结果图"
|
| 2628 |
)
|
|
|
|
| 2717 |
raise HTTPException(status_code=500, detail=f"黑白化处理失败: {str(e)}")
|
| 2718 |
|
| 2719 |
# 保存黑白化后的图像到IMAGES_DIR
|
| 2720 |
+
grayscale_path, grayscale_filename = _get_save_path(grayscale_filename)
|
| 2721 |
save_success = save_image_high_quality(
|
| 2722 |
grayscale_image, grayscale_path, quality=SAVE_QUALITY
|
| 2723 |
)
|
|
|
|
| 2834 |
upscaled_height, upscaled_width = upscaled_image.shape[:2]
|
| 2835 |
|
| 2836 |
# 保存超清后的图像到IMAGES_DIR(与其他接口保持一致)
|
| 2837 |
+
upscaled_path, upscaled_filename = _get_save_path(upscaled_filename)
|
| 2838 |
save_success = save_image_high_quality(
|
| 2839 |
upscaled_image, upscaled_path, quality=SAVE_QUALITY
|
| 2840 |
)
|
|
|
|
| 3019 |
processed_height, processed_width = processed_image.shape[:2]
|
| 3020 |
|
| 3021 |
# 保存抠图后的图像到IMAGES_DIR(与facescore保持一致)
|
| 3022 |
+
processed_path, processed_filename = _get_save_path(processed_filename)
|
| 3023 |
bos_uploaded = False
|
| 3024 |
|
| 3025 |
# 根据是否有透明背景选择保存方式
|
|
|
|
| 3197 |
processed_height, processed_width = processed_image.shape[:2]
|
| 3198 |
|
| 3199 |
# 保存抠图后的图像到IMAGES_DIR(与facescore保持一致)
|
| 3200 |
+
processed_path, processed_filename = _get_save_path(processed_filename)
|
| 3201 |
bos_uploaded = False
|
| 3202 |
|
| 3203 |
# 根据是否有透明背景选择保存方式
|
|
|
|
| 3455 |
|
| 3456 |
# 生成格子文件名
|
| 3457 |
grid_index = row * cols + col + 1 # 从1开始编号
|
| 3458 |
+
grid_filename_raw = f"{original_md5_hash}_grid_{grid_name}_{grid_index:02d}.webp"
|
| 3459 |
+
grid_path, grid_filename = _get_save_path(grid_filename_raw)
|
| 3460 |
|
| 3461 |
# 保存格子图片
|
| 3462 |
save_success = save_image_high_quality(grid_image, grid_path, quality=SAVE_QUALITY)
|
|
|
|
| 3478 |
)
|
| 3479 |
|
| 3480 |
# 同时保存原图到IMAGES_DIR供向量化使用
|
| 3481 |
+
original_filename_raw = f"{original_md5_hash}_original.webp"
|
| 3482 |
+
original_path, original_filename = _get_save_path(original_filename_raw)
|
| 3483 |
if save_image_high_quality(image, original_path, quality=SAVE_QUALITY):
|
| 3484 |
await _record_output_file(
|
| 3485 |
file_path=original_path,
|
|
|
|
| 3615 |
raise HTTPException(status_code=500, detail=f"压缩处理失败: {str(e)}")
|
| 3616 |
|
| 3617 |
# 保存压缩后的图像到IMAGES_DIR
|
| 3618 |
+
compressed_path, compressed_filename = _get_save_path(compressed_filename)
|
| 3619 |
try:
|
| 3620 |
with open(compressed_path, "wb") as f:
|
| 3621 |
f.write(compressed_bytes)
|
| 3622 |
+
save_success = True
|
| 3623 |
bos_uploaded = upload_file_to_bos(compressed_path)
|
| 3624 |
logger.info(f"Compressed image saved successfully: {compressed_path}")
|
| 3625 |
|
| 3626 |
# 异步执行图片向量化并入库,不阻塞主流程
|
| 3627 |
if CLIP_AVAILABLE:
|
| 3628 |
asyncio.create_task(handle_image_vector_async(compressed_path, compressed_filename))
|
| 3629 |
+
|
| 3630 |
await _record_output_file(
|
| 3631 |
file_path=compressed_path,
|
| 3632 |
nickname=nickname,
|
|
|
|
| 4449 |
md5_hash2 = f"ugc_{str(uuid.uuid4()).replace('-', '')}"
|
| 4450 |
|
| 4451 |
# 生成文件名
|
| 4452 |
+
original_filename1_raw = f"{md5_hash1}_original1.webp"
|
| 4453 |
+
original_filename2_raw = f"{md5_hash2}_original2.webp"
|
| 4454 |
+
face_filename1_raw = f"{md5_hash1}_face1.webp"
|
| 4455 |
+
face_filename2_raw = f"{md5_hash2}_face2.webp"
|
| 4456 |
+
|
| 4457 |
+
original_path1, original_filename1 = _get_save_path(original_filename1_raw)
|
| 4458 |
+
original_path2, original_filename2 = _get_save_path(original_filename2_raw)
|
| 4459 |
+
face_path1, face_filename1 = _get_save_path(face_filename1_raw)
|
| 4460 |
+
face_path2, face_filename2 = _get_save_path(face_filename2_raw)
|
| 4461 |
|
| 4462 |
logger.info(f"Starting face similarity verification: {file1.filename} vs {file2.filename}")
|
| 4463 |
t1 = time.perf_counter()
|
|
|
|
| 4491 |
raise HTTPException(status_code=400, detail="第二张图片中未检测到人脸,请上传包含清晰人脸的图片")
|
| 4492 |
|
| 4493 |
# 保存原始图片到IMAGES_DIR(先不上传 BOS,供 DeepFace 使用)
|
| 4494 |
+
# original_path1 和 original_path2 已经在前面通过 _get_save_path 生成
|
| 4495 |
if not save_image_high_quality(
|
| 4496 |
image1,
|
| 4497 |
original_path1,
|
|
|
|
| 4500 |
):
|
| 4501 |
raise HTTPException(status_code=500, detail="保存第一张原始图片失败")
|
| 4502 |
|
|
|
|
| 4503 |
if not save_image_high_quality(
|
| 4504 |
image2,
|
| 4505 |
original_path2,
|
|
|
|
| 4655 |
face_img1 = image1[y1:y1_end, x1:x1_end]
|
| 4656 |
face_img2 = image2[y2:y2_end, x2:x2_end]
|
| 4657 |
|
| 4658 |
+
# face_path1 和 face_path2 已经通过 _get_save_path 生成
|
|
|
|
| 4659 |
|
| 4660 |
# 直接保存裁剪图,不进行特征点绘制
|
| 4661 |
if save_image_high_quality(face_img1, face_path1, quality=SAVE_QUALITY):
|
face_analyzer.py
CHANGED
|
@@ -11,7 +11,7 @@ from config import logger, MODELS_PATH, OUTPUT_DIR, DEEPFACE_AVAILABLE, \
|
|
| 11 |
YOLO_AVAILABLE
|
| 12 |
from facial_analyzer import FacialFeatureAnalyzer
|
| 13 |
from models import ModelType
|
| 14 |
-
from utils import save_image_high_quality
|
| 15 |
|
| 16 |
if DEEPFACE_AVAILABLE:
|
| 17 |
from deepface import DeepFace
|
|
@@ -981,7 +981,8 @@ class EnhancedFaceAnalyzer:
|
|
| 981 |
pass
|
| 982 |
|
| 983 |
# 保存裁剪的人脸
|
| 984 |
-
|
|
|
|
| 985 |
cropped_face_path = os.path.join(OUTPUT_DIR, cropped_face_filename)
|
| 986 |
try:
|
| 987 |
save_image_high_quality(face_cropped, cropped_face_path)
|
|
|
|
| 11 |
YOLO_AVAILABLE
|
| 12 |
from facial_analyzer import FacialFeatureAnalyzer
|
| 13 |
from models import ModelType
|
| 14 |
+
from utils import save_image_high_quality, get_date_subfolder
|
| 15 |
|
| 16 |
if DEEPFACE_AVAILABLE:
|
| 17 |
from deepface import DeepFace
|
|
|
|
| 981 |
pass
|
| 982 |
|
| 983 |
# 保存裁剪的人脸
|
| 984 |
+
date_sub = get_date_subfolder(OUTPUT_DIR)
|
| 985 |
+
cropped_face_filename = f"{date_sub}/{original_image_hash}_face_{i + 1}.webp"
|
| 986 |
cropped_face_path = os.path.join(OUTPUT_DIR, cropped_face_filename)
|
| 987 |
try:
|
| 988 |
save_image_high_quality(face_cropped, cropped_face_path)
|
utils.py
CHANGED
|
@@ -6,6 +6,7 @@ import re
|
|
| 6 |
import shutil
|
| 7 |
import threading
|
| 8 |
import time
|
|
|
|
| 9 |
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 10 |
from typing import Optional
|
| 11 |
from collections import OrderedDict
|
|
@@ -480,13 +481,20 @@ def upload_file_to_bos(file_path: str, object_name: str | None = None) -> bool:
|
|
| 480 |
if object_name:
|
| 481 |
object_key = object_name.strip("/ ")
|
| 482 |
else:
|
| 483 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 484 |
if BOS_IMAGE_DIR:
|
| 485 |
object_key = "/".join(
|
| 486 |
-
part.strip("/ ") for part in (BOS_IMAGE_DIR,
|
| 487 |
)
|
| 488 |
-
else:
|
| 489 |
-
object_key = base_name
|
| 490 |
|
| 491 |
mtime_ns = getattr(file_stat, "st_mtime_ns", int(file_stat.st_mtime * 1_000_000_000))
|
| 492 |
cache_signature = (mtime_ns, file_stat.st_size)
|
|
@@ -602,6 +610,21 @@ def image_to_base64(image: np.ndarray) -> str:
|
|
| 602 |
return f"data:image/webp;base64,{img_base64}"
|
| 603 |
|
| 604 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 605 |
def save_base64_to_unique_file(
|
| 606 |
base64_string: str, output_dir: str = "output_images"
|
| 607 |
) -> str | None:
|
|
|
|
| 6 |
import shutil
|
| 7 |
import threading
|
| 8 |
import time
|
| 9 |
+
from datetime import datetime
|
| 10 |
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 11 |
from typing import Optional
|
| 12 |
from collections import OrderedDict
|
|
|
|
| 481 |
if object_name:
|
| 482 |
object_key = object_name.strip("/ ")
|
| 483 |
else:
|
| 484 |
+
# 如果文件在 _IMAGES_DIR_ABS 之下,保持其相对路径作为 object_key,包含可能的子目录(如日期目录)
|
| 485 |
+
try:
|
| 486 |
+
# 这里的 _IMAGES_DIR_ABS 已经提前在 utils.py 定义了
|
| 487 |
+
rel_path = os.path.relpath(expanded_path, _IMAGES_DIR_ABS)
|
| 488 |
+
# 路径分隔符统一使用正斜杠,防止 Windows 兼容性问题
|
| 489 |
+
object_key = rel_path.replace(os.sep, "/")
|
| 490 |
+
except (ValueError, Exception):
|
| 491 |
+
# 兜底:如果无法计算相对路径,则使用基础文件名
|
| 492 |
+
object_key = os.path.basename(expanded_path)
|
| 493 |
+
|
| 494 |
if BOS_IMAGE_DIR:
|
| 495 |
object_key = "/".join(
|
| 496 |
+
part.strip("/ ") for part in (BOS_IMAGE_DIR, object_key) if part
|
| 497 |
)
|
|
|
|
|
|
|
| 498 |
|
| 499 |
mtime_ns = getattr(file_stat, "st_mtime_ns", int(file_stat.st_mtime * 1_000_000_000))
|
| 500 |
cache_signature = (mtime_ns, file_stat.st_size)
|
|
|
|
| 610 |
return f"data:image/webp;base64,{img_base64}"
|
| 611 |
|
| 612 |
|
| 613 |
+
def get_date_subfolder(base_dir: str) -> str:
|
| 614 |
+
"""
|
| 615 |
+
返回 picture/YYYYMMDD 格式的子目录名,并确保该目录在 base_dir 下存在。
|
| 616 |
+
:param base_dir: 基础目录路径
|
| 617 |
+
:return: 子目录名称 (如 "picture/20240305")
|
| 618 |
+
"""
|
| 619 |
+
date_str = datetime.now().strftime("%Y%m%d")
|
| 620 |
+
# 增加一层 picture 目录
|
| 621 |
+
sub_folder = os.path.join("picture", date_str)
|
| 622 |
+
target_dir = os.path.join(base_dir, sub_folder)
|
| 623 |
+
os.makedirs(target_dir, exist_ok=True)
|
| 624 |
+
# 统一使用正斜杠返回,确保跨平台和 BOS 路径一致
|
| 625 |
+
return sub_folder.replace(os.sep, "/")
|
| 626 |
+
|
| 627 |
+
|
| 628 |
def save_base64_to_unique_file(
|
| 629 |
base64_string: str, output_dir: str = "output_images"
|
| 630 |
) -> str | None:
|