interleaved-umm / scripts /analyze_errors.py
Caesarrr's picture
Add files using upload-large-folder tool
18309e8 verified
import re
import os
from collections import Counter
# ================= 配置区域 =================
# 1. 你的错误日志文件路径
LOG_FILE = 'copy_errors.log'
# 2. 图片路径中 category 和 sequence 的位置模式
# 假设路径是: data/raw_images/{category}/{sequence}/images/filename.jpg
# 我们使用正则来提取。
# 解释:
# data/raw_images/ -> 匹配前缀 (根据你的实际路径调整,或者用通用的)
# ([^/]+) -> 第一个捕获组: category (不包含/的任意字符)
# / -> 分隔符
# ([^/]+) -> 第二个捕获组: sequence
# /images/ -> 后面紧跟着 images 目录
PATH_PATTERN = re.compile(r'data/raw_images/([^/]+)/([^/]+)/images/')
# ===========================================
def analyze_log(log_file):
if not os.path.exists(log_file):
print(f"[Error] 找不到日志文件: {log_file}")
return
print(f"[*] 正在分析日志文件: {log_file} ...")
failed_categories = set()
failed_sequences = set()
# 用于统计每个 category 和 sequence 缺失了多少张图
category_counter = Counter()
sequence_counter = Counter()
# 用于存储 {category: [sequence1, sequence2, ...]} 的层级关系
cat_seq_map = {}
with open(log_file, 'r', encoding='utf-8') as f:
current_path = ""
for line in f:
line = line.strip()
# 提取日志中的图片路径行
# 日志格式: [FAIL] 图片路径: data/raw_images/bowl/...
if line.startswith("[FAIL] 图片路径:"):
# 提取路径部分
path_str = line.split(": ", 1)[1]
# 使用正则提取 category 和 sequence
match = PATH_PATTERN.search(path_str)
if match:
category = match.group(1)
sequence = match.group(2)
# 存入集合 (去重)
failed_categories.add(category)
failed_sequences.add(sequence)
# 计数
category_counter[category] += 1
sequence_counter[sequence] += 1
# 构建层级关系
if category not in cat_seq_map:
cat_seq_map[category] = set()
cat_seq_map[category].add(sequence)
else:
# 如果正则匹配失败,可能路径格式不一样,打印出来看看
# print(f"[Warning] 无法解析路径结构: {path_str}")
pass
# ================= 输出结果 =================
print("\n" + "="*50)
print("分析报告 (Analysis Report)")
print("="*50)
print(f"\n1. 缺失图片涉及的 Category 总数: {len(failed_categories)}")
print("-" * 30)
# 按缺失数量从多到少排序
for cat, count in category_counter.most_common():
print(f" - {cat}: 缺失 {count} 张图")
print(f"\n2. 缺失图片涉及的 Sequence 总数: {len(failed_sequences)}")
print("-" * 30)
# 如果 sequence 太多,只打印前 20 个
top_n = 20
for seq, count in sequence_counter.most_common(top_n):
print(f" - {seq}: 缺失 {count} 张图")
if len(sequence_counter) > top_n:
print(f" ... (还有 {len(sequence_counter) - top_n} 个 sequence)")
print(f"\n3. 详细层级结构 (Category -> Sequence)")
print("-" * 30)
for cat in sorted(cat_seq_map.keys()):
seqs = cat_seq_map[cat]
print(f"[{cat}] 下有 {len(seqs)} 个有问题的 sequence:")
# 将 set 转为 list 并排序,方便查看
sorted_seqs = sorted(list(seqs))
# 打印方式:如果很多,就一行打印多个
# 这里简单处理,每行打印一个,或者你可以改成逗号分隔
print(f" {', '.join(sorted_seqs)}")
print("")
# 保存结果到文件
output_file = "missing_stats.txt"
with open(output_file, 'w', encoding='utf-8') as f_out:
f_out.write("=== 缺失数据统计 ===\n")
f_out.write(f"Category 总数: {len(failed_categories)}\n")
f_out.write(f"Sequence 总数: {len(failed_sequences)}\n\n")
f_out.write("=== Category 列表 (格式: 名称 [缺失数量]) ===\n")
for cat, count in category_counter.most_common():
f_out.write(f"{cat} [{count}]\n")
f_out.write("\n=== Sequence 列表 (按 Category 分组) ===\n")
for cat in sorted(cat_seq_map.keys()):
f_out.write(f"\n[{cat}]\n")
for seq in sorted(list(cat_seq_map[cat])):
f_out.write(f" - {seq} (缺失 {sequence_counter[seq]} 张)\n")
print(f"\n[*] 详细统计已保存至: {output_file}")
if __name__ == "__main__":
analyze_log(LOG_FILE)