ASLLRP_utterances_results / SignX /doc /generate_gloss_with_frames.py
FangSen9000
Track SMKD .h5 artifacts via LFS
b4b4729
#!/usr/bin/env python3
"""
为ASLLRP_utterances_mapping.txt中的每个gloss词添加起始和结束帧号
输出格式:
{
"utterance_id": {
"glosses": [
{
"gloss": "THAT",
"start_30fps": 9, # 相对于utterance起始的帧号(30 FPS)
"end_30fps": 13,
"start_24fps": 7, # 转换为24 FPS后的帧号
"end_24fps": 10,
"duration_30fps": 4, # 持续帧数(30 FPS)
"duration_24fps": 3, # 持续帧数(24 FPS)
"sign_type": "Lexical Signs"
},
...
],
"total_frames_30fps": 280,
"total_frames_24fps": 224,
"duration_seconds": 9.33
}
}
"""
import csv
import json
from collections import defaultdict, OrderedDict
from pathlib import Path
# 目录 & 文件路径
_SCRIPT_PATH = Path(__file__).resolve()
_CWD = Path.cwd()
if (_CWD / "doc" / _SCRIPT_PATH.name).exists():
SIGNX_DIR = _CWD
else:
SIGNX_DIR = _SCRIPT_PATH.parents[1]
DATA_ROOT = SIGNX_DIR.parent
MAPPING_FILE = DATA_ROOT / "ASLLRP_utterances_mapping.txt"
CSV_FILE = DATA_ROOT / "asllrp_sentence_signs_2025_06_28.csv"
OUTPUT_JSON = DATA_ROOT / "ASLLRP_utterances_with_frames.json"
OUTPUT_TXT = DATA_ROOT / "ASLLRP_utterances_with_frames.txt"
FALLBACK_DIRS = [
SIGNX_DIR,
Path("/tmp/signx_outputs")
]
def open_with_fallback(preferred_path, mode):
"""尝试写入文件,若失败则依次使用备用目录"""
last_path = None
last_error = None
candidates = [preferred_path] + [base / preferred_path.name for base in FALLBACK_DIRS]
for candidate in candidates:
try:
candidate.parent.mkdir(parents=True, exist_ok=True)
handle = candidate.open(mode, encoding='utf-8')
if last_path is not None:
print(f" * 无法写入 {last_path} ({last_error}), 改写到 {candidate}")
return handle, candidate
except PermissionError as e:
last_path = candidate
last_error = e
continue
# 如果所有候选都失败,则抛出最后一次错误
raise last_error
def normalize_gloss(token):
"""标准化gloss字符串,方便匹配"""
if token is None:
return ""
token = token.strip()
if token.startswith('"') and token.endswith('"') and len(token) > 1:
token = token[1:-1]
return token.strip().replace("\u200b", "").lower()
def load_mapping():
"""加载mapping文件"""
mapping = {}
with MAPPING_FILE.open('r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line or ': ' not in line:
continue
utterance_id, glosses = line.split(': ', 1)
mapping[utterance_id] = glosses.split()
return mapping
def load_csv_signs():
"""从CSV加载所有signs的详细信息"""
signs_by_utterance = defaultdict(list)
skipped_rows = 0
with CSV_FILE.open('r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
try:
utterance_video = row['Utterance video filename'].replace('.mp4', '')
# 提取帧号信息 - 添加错误处理
utterance_start = int(row['Start frame of the containing utterance'])
utterance_end = int(row['End frame of the containing utterance'])
sign_start = int(row['Start frame of the sign video'])
sign_end = int(row['End frame of the sign video'])
except (ValueError, KeyError) as e:
# 跳过有问题的行
skipped_rows += 1
continue
# 计算相对于utterance起始的帧号(假设CSV是30 FPS)
relative_start_30fps = sign_start - utterance_start
relative_end_30fps = sign_end - utterance_start
# 转换为24 FPS
relative_start_24fps = int(relative_start_30fps * 24 / 30)
relative_end_24fps = int(relative_end_30fps * 24 / 30)
sign_info = {
'gloss': row['Main entry gloss label'],
'start_30fps': relative_start_30fps,
'end_30fps': relative_end_30fps,
'start_24fps': relative_start_24fps,
'end_24fps': relative_end_24fps,
'duration_30fps': relative_end_30fps - relative_start_30fps,
'duration_24fps': relative_end_24fps - relative_start_24fps,
'sign_type': row['Sign type'],
'utterance_start_frame': utterance_start,
'utterance_end_frame': utterance_end,
}
signs_by_utterance[utterance_video].append(sign_info)
if skipped_rows > 0:
print(f" 警告: 跳过了 {skipped_rows} 个格式错误的CSV行")
return signs_by_utterance
def fps_convert_30_to_24(frame_30fps):
"""将30 FPS帧号转换为24 FPS"""
return int(frame_30fps * 24 / 30)
def align_gloss_sequence(mapping_glosses, csv_signs):
"""
按mapping顺序匹配CSV中的gloss,自动跳过多余的CSV词条
返回:[(mapping_gloss, csv_sign_info), ...], missing_glosses, extra_csv_signs
"""
matched = []
missing = []
extras = []
csv_idx = 0
total_csv = len(csv_signs)
for gloss in mapping_glosses:
norm_gloss = normalize_gloss(gloss)
found = None
while csv_idx < total_csv:
candidate = csv_signs[csv_idx]
csv_idx += 1
candidate_norm = normalize_gloss(candidate['gloss'])
if candidate_norm == norm_gloss:
found = candidate
break
else:
extras.append(candidate)
if found is None:
missing.append(gloss)
else:
matched.append((gloss, found))
if csv_idx < total_csv:
extras.extend(csv_signs[csv_idx:])
return matched, missing, extras
def generate_gloss_with_frames():
"""生成包含帧号的gloss数据"""
print("加载数据...")
mapping = load_mapping()
csv_signs = load_csv_signs()
result = OrderedDict()
missing_utterances = []
alignment_warnings = []
print(f"处理 {len(mapping)} 个utterances...")
for utterance_id, gloss_sequence in mapping.items():
if utterance_id not in csv_signs:
missing_utterances.append(utterance_id)
continue
signs = csv_signs[utterance_id]
# 计算总帧数
if signs:
total_frames_30fps = signs[0]['utterance_end_frame'] - signs[0]['utterance_start_frame']
total_frames_24fps = fps_convert_30_to_24(total_frames_30fps)
duration_seconds = total_frames_30fps / 30.0
else:
total_frames_30fps = 0
total_frames_24fps = 0
duration_seconds = 0
matched_pairs, missing_glosses, extra_signs = align_gloss_sequence(gloss_sequence, signs)
glosses_with_frames = []
for gloss_token, sign in matched_pairs:
glosses_with_frames.append({
'gloss': gloss_token, # 保留mapping中的原始写法
'source_gloss': sign['gloss'],
'start_30fps': sign['start_30fps'],
'end_30fps': sign['end_30fps'],
'start_24fps': sign['start_24fps'],
'end_24fps': sign['end_24fps'],
'duration_30fps': sign['duration_30fps'],
'duration_24fps': sign['duration_24fps'],
'sign_type': sign['sign_type']
})
if missing_glosses or extra_signs:
alignment_warnings.append({
'utterance': utterance_id,
'missing': missing_glosses,
'extra_count': len(extra_signs)
})
result[utterance_id] = {
'glosses': glosses_with_frames,
'total_frames_30fps': total_frames_30fps,
'total_frames_24fps': total_frames_24fps,
'duration_seconds': round(duration_seconds, 2),
'gloss_count': len(glosses_with_frames)
}
# 保存JSON格式
print(f"\n保存JSON格式到 {OUTPUT_JSON}...")
json_file, json_output_path = open_with_fallback(OUTPUT_JSON, 'w')
with json_file as jf:
json.dump(result, jf, indent=2, ensure_ascii=False)
# 保存可读文本格式
print(f"保存文本格式到 {OUTPUT_TXT}...")
txt_file, txt_output_path = open_with_fallback(OUTPUT_TXT, 'w')
with txt_file as tf:
for utterance_id, data in result.items():
gloss_strings = []
for g in data['glosses']:
gloss_strings.append(
f"{g['gloss']}[{g['start_30fps']}:{g['end_30fps']}->"
f"{g['start_24fps']}:{g['end_24fps']}]"
)
tf.write(f"{utterance_id}: {' '.join(gloss_strings)}\n")
# 输出统计信息
print("\n" + "="*80)
print("处理完成!")
print("="*80)
print(f"总utterances: {len(mapping)}")
print(f"成功处理: {len(result)}")
print(f"缺失CSV数据: {len(missing_utterances)}")
if missing_utterances:
print(f"\n缺失的utterances (前10个): {missing_utterances[:10]}")
if alignment_warnings:
print(f"\n存在对齐差异的utterances: {len(alignment_warnings)}")
for warn in alignment_warnings[:10]:
print(f" - {warn['utterance']}: "
f"missing={warn['missing'][:3]} extra_signs={warn['extra_count']}")
# 显示一些示例
print("\n示例数据 (前3个):")
print("-"*80)
for i, (utterance_id, data) in enumerate(list(result.items())[:3], 1):
print(f"\n{i}. Utterance {utterance_id}:")
print(f" 总帧数: {data['total_frames_30fps']} (30fps) / {data['total_frames_24fps']} (24fps)")
print(f" 时长: {data['duration_seconds']} 秒")
print(f" Gloss数量: {data['gloss_count']}")
print(f" 前5个glosses:")
for g in data['glosses'][:5]:
print(f" - {g['gloss']}: "
f"30fps[{g['start_30fps']}:{g['end_30fps']}] "
f"-> 24fps[{g['start_24fps']}:{g['end_24fps']}] "
f"({g['duration_24fps']}帧)")
return result, alignment_warnings, missing_utterances, json_output_path, txt_output_path
def create_compact_format(result):
"""创建紧凑格式的输出(类似原始mapping.txt)"""
OUTPUT_COMPACT = "ASLLRP_utterances_compact_frames.txt"
print(f"\n创建紧凑格式 {OUTPUT_COMPACT}...")
compact_file, compact_path = open_with_fallback(DATA_ROOT / OUTPUT_COMPACT, 'w')
with compact_file as cf:
for utterance_id, data in result.items():
parts = [f"{g['gloss']}|{g['start_24fps']}-{g['end_24fps']}" for g in data['glosses']]
cf.write(f"{utterance_id}: {' '.join(parts)}\n")
print(f"紧凑格式已保存到 {compact_path}")
return compact_path
if __name__ == "__main__":
print("ASLLRP Gloss帧号生成工具")
print("="*80)
result, alignment_warnings, missing_utterances, json_path, txt_path = generate_gloss_with_frames()
compact_path = create_compact_format(result)
print("\n" + "="*80)
print("生成的文件:")
print(f" 1. {json_path} - 完整JSON格式(包含30fps和24fps)")
print(f" 2. {txt_path} - 可读文本格式")
print(f" 3. {compact_path} - 紧凑格式(24fps)")
print("="*80)