anonseoul's picture
Backup turn-taking-dataset from MIR NAS
fb5d697 verified
import json
import os
from glob import glob
import numpy as np
# 24.05.06) made new .py file from .ipynb
# from _easycom_anno
transcript_path = '../../mmaction2/data/EasyComDataset_fix/Main/Speech_Transcriptions/'
unified_transcript_path = 'unified_transcripts'
metadata_path = '../../notebooks/results/easycom_metadata.json'
annotation_path = '/home/junhyeok/projects/turn-taking/datasets/EasyCom/target_perframe/case1'
# print(glob(os.path.join(transcript_path, '*')))
# transcript_files = glob(os.path.join(transcript_path, '*', '*', '*.json'))
# transcript_files.sort()
# print(transcript_files)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
print(metadata)
print(metadata.keys())
video_fps = 20
anno_fps = 5
chunk_size = video_fps / anno_fps
if chunk_size != int(chunk_size):
print("Chunk size is not an integer")
exit()
chunk_size = int(chunk_size)
unified_transcripts = {}
annotation = {}
for session in metadata.keys():
session_metadata = metadata[session]
print(session)
session_total_frames = int(session_metadata['total_frames']) // chunk_size
anno = np.zeros((session_total_frames, 3))
annotation[session] = {}
# Initialize an empty list to store transcript data for the current session
session_transcripts = []
frame_idx = 0
for seg in session_metadata['segments']:
video_name = seg['video_name']
print(seg['video_name'])
transcript_pattern = os.path.join(transcript_path, '*', session, f"{video_name}.json")
transcript_file = glob(transcript_pattern)
if len(transcript_file) > 1:
print("something is wrong")
exit()
transcript_file = transcript_file[0]
# EasyCom frames start with 1. (of course, there is no frame 0 in video)
# 11/22) then, shouldn't we start with 0 in annotation, so we need to subtract 1 from the start frame
# 11/22 we didn't do this
# Load and process the transcript file as needed
with open(transcript_file, 'r') as tf:
transcript_data = json.load(tf)
for utt in transcript_data:
utt['Start_Frame'] += frame_idx
utt['End_Frame'] += frame_idx
session_transcripts.extend(transcript_data)
print(transcript_data[0])
frame_idx += seg['num_frames']
# Save the unified transcript as a JSON file
with open(os.path.join(unified_transcript_path, f"{session}.json"), 'w') as json_file:
json.dump(session_transcripts, json_file, indent=4)