ASLLRP_utterances_results / SignX /doc /quick_coverage_eval.py
FangSen9000
Track SMKD .h5 artifacts via LFS
b4b4729
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Quick coverage evaluation using pre-generated attention analysis outputs.
This script walks through a detailed attention directory (e.g.
checkpoints_asllrp/detailed_eval-init.trans_*.*/sample_XXX), loads the
frame_alignment.json produced for each sample, aligns it with the
corresponding reference gloss sequence, and approximates frame coverage
against ASLLRP ground-truth annotations.
Usage:
python quick_coverage_eval.py \
--detail-dir checkpoints_asllrp/detailed_eval-init.trans_20251228_041351 \
--ref-file preprocessed-asllrp/dev.bpe.gloss
"""
import argparse
import json
import re
from pathlib import Path
from collections import defaultdict
from difflib import SequenceMatcher
def clean_gloss_text(text: str) -> str:
"""Remove BPE annotations and trim whitespace."""
return text.replace('@@ ', '').replace('@@', '').strip()
def load_reference_sentences(ref_path: Path):
"""Load reference sentences (sorted by numeric index)."""
refs = []
with ref_path.open('r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split()
try:
idx = int(parts[0])
except ValueError:
continue
sent = clean_gloss_text(' '.join(parts[1:]))
refs.append((idx, sent))
refs.sort(key=lambda x: x[0])
ordered = [sent for _, sent in refs]
return ordered
def load_mapping(mapping_path: Path):
"""Map gloss sentence -> list of ASLLRP utterance IDs."""
mapping = defaultdict(list)
with mapping_path.open('r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line or ':' not in line:
continue
utt_id, gloss = line.split(':', 1)
mapping[clean_gloss_text(gloss.strip())].append(utt_id.strip())
return mapping
def pop_video_id(mapping, sentence):
ids = mapping.get(sentence)
if not ids:
return None
return ids.pop(0)
def normalized_iou(pred, gt):
"""Compute IoU between two normalized [start, end] intervals."""
start = max(pred[0], gt[0])
end = min(pred[1], gt[1])
inter = max(0.0, end - start)
union = max(pred[1], gt[1]) - min(pred[0], gt[0])
if union <= 0:
return 0.0
return inter / union
def normalize_interval(start, end, total):
if total <= 0:
return 0.0, 0.0
return start / total, end / total
def normalize_token(token):
"""Normalize gloss tokens to be more tolerant (case/punctuation insensitive)."""
if token is None:
return ""
token = token.lower().replace("@@", "")
token = re.sub(r'[^a-z0-9]+', '', token)
return token
def token_similarity(a, b):
if not a or not b:
return 1.0 if a == b else 0.0
if a == b:
return 1.0
return SequenceMatcher(None, a, b).ratio()
def compute_coverage(detail_dir: Path,
ref_sentences,
mapping_path: Path,
gt_json_path: Path,
output_path: Path,
expansion_factors,
overflow_penalty):
mapping = load_mapping(mapping_path)
with gt_json_path.open('r', encoding='utf-8') as f:
gt_data = json.load(f)
samples = sorted([d for d in detail_dir.iterdir() if d.is_dir()])
assert len(samples) <= len(ref_sentences), \
"Reference sentences shorter than number of samples"
overall = {
"matched_tokens": 0,
"complete_coverage_hits": 0,
"iou_sum": 0.0,
"samples_with_matches": 0,
"skipped_samples": 0,
}
per_sample = []
for idx, sample_dir in enumerate(samples):
frame_file = sample_dir / "frame_alignment.json"
if not frame_file.exists():
overall["skipped_samples"] += 1
continue
with frame_file.open('r', encoding='utf-8') as f:
frame_data = json.load(f)
sentence = ref_sentences[idx]
video_id = pop_video_id(mapping, sentence)
if not video_id or video_id not in gt_data:
overall["skipped_samples"] += 1
continue
gt_glosses = gt_data[video_id]["glosses"]
if not gt_glosses:
overall["skipped_samples"] += 1
continue
gt_total = max(g['end_24fps'] for g in gt_glosses if 'end_24fps' in g)
if gt_total <= 0:
overall["skipped_samples"] += 1
continue
gt_entries = [{
'gloss': gt['gloss'],
'norm': normalize_token(gt['gloss']),
'start': gt['start_24fps'],
'end': gt['end_24fps']
} for gt in gt_glosses if 'start_24fps' in gt and 'end_24fps' in gt]
gt_used = [False] * len(gt_entries)
last_match_idx = 0
matches = []
total_frames_pred = max(frame_data.get("total_video_frames", 0), 1)
for pred in frame_data.get("frame_ranges", []):
word = pred['word']
word_norm = normalize_token(word)
match_idx = None
# search sequentially starting from last matched position
for idx in range(last_match_idx, len(gt_entries)):
if gt_used[idx]:
continue
if token_similarity(word_norm, gt_entries[idx]['norm']) >= 0.7:
match_idx = idx
break
if match_idx is None:
# fallback search entire list
for idx in range(len(gt_entries)):
if gt_used[idx]:
continue
if token_similarity(word_norm, gt_entries[idx]['norm']) >= 0.7:
match_idx = idx
break
if match_idx is None:
continue
gt_used[match_idx] = True
last_match_idx = max(last_match_idx, match_idx)
gt_entry = gt_entries[match_idx]
pred_norm = normalize_interval(
pred['start_frame'], pred['end_frame'], total_frames_pred)
gt_norm = normalize_interval(gt_entry['start'], gt_entry['end'], gt_total)
iou = normalized_iou(pred_norm, gt_norm)
complete = pred_norm[0] <= gt_norm[0] and pred_norm[1] >= gt_norm[1]
matches.append({
"word": word,
"pred_norm": pred_norm,
"gt_norm": gt_norm,
"iou": iou,
"complete": complete,
"pred_frames": (pred['start_frame'], pred['end_frame'], total_frames_pred),
"gt_frames": (gt_entry['start'], gt_entry['end'], gt_total)
})
if not matches:
overall["skipped_samples"] += 1
continue
sample_stats = {
"sample": sample_dir.name,
"video_id": video_id,
"matched": len(matches),
"complete_coverage": sum(1 for m in matches if m["complete"]),
"avg_iou": sum(m["iou"] for m in matches) / len(matches),
}
per_sample.append(sample_stats)
overall["matched_tokens"] += sample_stats["matched"]
overall["complete_coverage_hits"] += sample_stats["complete_coverage"]
overall["iou_sum"] += sample_stats["avg_iou"]
overall["samples_with_matches"] += 1
# word-level coverage stats with expansion
for factor in expansion_factors:
factor_stats = overall.setdefault("factor_stats", {}).setdefault(
factor, {"coverage_sum": 0.0, "count": 0, "perfect_hits": 0, "penalized": 0}
)
for match in matches:
pred_start, pred_end, pred_total = match["pred_frames"]
gt_start, gt_end, gt_total = match["gt_frames"]
if pred_total <= 0 or gt_total <= 0:
continue
pred_start_abs = pred_start / pred_total * gt_total
pred_end_abs = pred_end / pred_total * gt_total
if pred_end_abs <= pred_start_abs:
pred_end_abs = pred_start_abs + 1e-6
center = (pred_start_abs + pred_end_abs) / 2.0
half_len = (pred_end_abs - pred_start_abs) / 2.0 * factor
start_exp = max(0.0, center - half_len)
end_exp = min(gt_total, center + half_len)
overlap = max(0.0, min(end_exp, gt_end) - max(start_exp, gt_start))
gt_len = max(gt_end - gt_start, 1e-6)
coverage = overlap / gt_len
penalized = False
if start_exp < gt_start or end_exp > gt_end:
coverage = max(0.0, coverage - overflow_penalty)
penalized = True
factor_stats["coverage_sum"] += coverage
factor_stats["count"] += 1
if coverage >= 1.0:
factor_stats["perfect_hits"] += 1
if penalized:
factor_stats["penalized"] += 1
factor_summary = {}
factor_stats = overall.get("factor_stats", {})
for factor, stats in factor_stats.items():
if stats["count"] == 0:
continue
factor_summary[str(factor)] = {
"avg_coverage": stats["coverage_sum"] / stats["count"],
"perfect_rate": stats["perfect_hits"] / stats["count"],
"penalized_rate": stats["penalized"] / stats["count"],
"count": stats["count"],
}
overall_summary = {
"samples_evaluated": len(samples),
"samples_with_matches": overall["samples_with_matches"],
"samples_skipped": overall["skipped_samples"],
"avg_complete_coverage": (
overall["complete_coverage_hits"] / overall["matched_tokens"]
if overall["matched_tokens"] > 0 else 0.0
),
"avg_iou": (
overall["iou_sum"] / overall["samples_with_matches"]
if overall["samples_with_matches"] > 0 else 0.0
),
"word_level": factor_summary
}
output = {
"detail_dir": str(detail_dir),
"overall": overall_summary,
"sample_stats": per_sample,
}
with output_path.open('w', encoding='utf-8') as f:
json.dump(output, f, indent=2)
return output
def main():
parser = argparse.ArgumentParser(description="Quick coverage evaluator")
parser.add_argument("--detail-dir", type=Path, required=True,
help="Path to detailed attention directory")
parser.add_argument("--ref-file", type=Path,
default=Path("preprocessed-asllrp/dev.bpe.gloss"),
help="Reference gloss file with indices")
parser.add_argument("--mapping-file", type=Path,
default=Path("../ASLLRP_utterances_mapping.txt"),
help="Utterance mapping file (video_id: gloss ...)")
parser.add_argument("--gt-json", type=Path,
default=Path("../ASLLRP_utterances_with_frames.json"),
help="JSON with ground-truth frame annotations")
parser.add_argument("--output", type=Path,
default=Path("coverage_summary.json"),
help="Output summary JSON path")
parser.add_argument("--expansion-factors", type=str, default="1.0,1.5,2.0",
help="Comma-separated list of expansion multipliers to test")
parser.add_argument("--overflow-penalty", type=float, default=0.5,
help="Penalty to subtract if expanded window exceeds GT range")
args = parser.parse_args()
expansion_factors = [float(x) for x in args.expansion_factors.split(',') if x.strip()]
ref_sentences = load_reference_sentences(args.ref_file)
summary = compute_coverage(
detail_dir=args.detail_dir,
ref_sentences=ref_sentences,
mapping_path=args.mapping_file,
gt_json_path=args.gt_json,
output_path=args.output,
expansion_factors=expansion_factors,
overflow_penalty=args.overflow_penalty
)
print(json.dumps(summary["overall"], indent=2))
print(f"\nPer-sample stats saved to {args.output}")
if __name__ == "__main__":
main()