turn-taking-dataset / anno_preprocess /EASYCOM /_preprocess_annotation_v2.py
anonseoul's picture
Backup turn-taking-dataset from MIR NAS
fb5d697 verified
import argparse
import json
import os
import re
from glob import glob
import numpy as np
# in this script, make a label within 5 classes
# [background, target speaker speaking, target speaker backchannel, other people speaking]
# the principle is: (please refer _measure_backchannels.py)
# 1. the most 30 frequent phrases in are considered as backchannel
# 2. from that, classify class and assign to label
transcript_path = '../../mmaction2/data/EasyComDataset_fix/Main/Speech_Transcriptions/'
metadata_path = '../../notebooks/results/easycom_metadata.json'
annotation_path = '/home/junhyeok/projects/turn-taking/datasets/EasyCom/target_perframe/case1_remove_bc'
backchannel_phrases_path = 'backchannel_30_frequent_word.json' # Path to your backchannel phrases JSON
# SEPARATE_BACKCHANNEL / INCLUDE_BACKCHANNEL / CLASS_NUM / CLOSED_INTERVAL
# case1: False / False / 3 / False
# case1_nobc: True / False / 3 / False
SEPARATE_BACKCHANNEL = True
INCLUDE_BACKCHANNEL = False
CLASS_NUM = 3
CLOSED_INTERVAL = False
print(f"Annotation path: {annotation_path}")
print(f"Transcript path: {transcript_path}")
print(f"Metadata path: {metadata_path}")
print(f"Backchannel phrases path: {backchannel_phrases_path}")
print(f"Include backchannel: {INCLUDE_BACKCHANNEL}")
print(f"Class num: {CLASS_NUM}")
print(f"Closed interval: {CLOSED_INTERVAL}")
def load_backchannel_phrases():
with open(backchannel_phrases_path, 'r') as f:
return json.load(f)
def main(args):
backchannel_phrases = load_backchannel_phrases()
with open(metadata_path, 'r') as f:
metadata = json.load(f)
video_fps = args.video_fps
anno_fps = args.anno_fps
chunk_size = int(video_fps / anno_fps)
annotation = {}
for session in metadata.keys():
session_metadata = metadata[session]
session_total_frames = int(session_metadata['total_frames']) // chunk_size
anno = np.zeros((session_total_frames, CLASS_NUM))
annotation[session] = {}
frame_idx = 0
for seg in session_metadata['segments']:
transcript_pattern = os.path.join(transcript_path, '*', session, f"{seg['video_name']}.json")
transcript_file = glob(transcript_pattern)
if len(transcript_file) != 1:
print("Error: multiple or no transcript files found.")
continue
with open(transcript_file[0], 'r') as tf:
transcript_data = json.load(tf)
for utt in transcript_data:
start = (utt['Start_Frame'] - 1 + frame_idx) // chunk_size
end = (utt['End_Frame'] - 1 + frame_idx) // chunk_size
start = int(start)
end = int(end)
if start < 0:
raise ValueError(f"Start frame is negative: {start}")
cleaned_transcript = re.sub(r'[^\w\s\[\]]', '', utt['Transcription'].lower()).strip()
if SEPARATE_BACKCHANNEL == True:
if INCLUDE_BACKCHANNEL == True:
if cleaned_transcript in backchannel_phrases:
# backchannel, if speaker is our target, assign 2, otherwise 4
class_type = 2 if utt['Participant_ID'] == 2 else 4
else:
class_type = 1 if utt['Participant_ID'] == 2 else 3
else:
# separate backchannel, but not include as different label or speaking
if cleaned_transcript in backchannel_phrases:
continue
else:
# if the transcript is not backchannel, assign 1 or 2
class_type = 1 if utt['Participant_ID'] == 2 else 2
else:
class_type = 1 if utt['Participant_ID'] == 2 else 2
if CLOSED_INTERVAL:
anno[start:end+1, class_type] = 1
else:
anno[start:end, class_type] = 1
frame_idx += seg['num_frames']
# if there is no annotation, assign background class
anno[np.where(np.sum(anno, axis=1) == 0), 0] = 1
annotation[session]['anno'] = anno
annotation[session]['feature_length'] = session_total_frames
np.save(os.path.join(annotation_path, f"{session}.npy"), anno)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--video-fps', type=int, default=20, help='original fps of video')
parser.add_argument('--anno-fps', type=int, default=5, help='fps of annotation')
args = parser.parse_args()
main(args)