anonseoul's picture
Backup turn-taking-dataset from MIR NAS
fb5d697 verified
import json
# annotations = {date: , version: , description:, videos: [{video_uid: , split: , clips:[],[]... }],[{}]...}
#clips' key : (['clip_uid', 'source_clip_uid', 'video_uid', 'video_start_sec', 'video_end_sec', 'video_start_frame', 'video_end_frame', 'clip_start_sec', 'clip_end_sec', 'clip_start_frame', 'clip_end_frame', 'valid', 'camera_wearer', 'persons', 'missing_voice_segments', 'transcriptions', 'social_segments_talking', 'social_segments_looking'])
#ex. video_start_sec : 80.99 , clip_start_sec : 0 , video_start_frame : 2430 , clip_start_frame : 0
#clips: [{.. ,'transcriptions': ['transcription', 'start_time_sec', 'end_time_sec', 'person_id', 'video_start_time', 'video_start_frame', 'video_end_time', 'video_end_frame'] ,[], ..]
#number of av train annotations' videos : 153
#한 video 안에 여러 clips 존재
#uid 기준? clip_uid , source_clip_uid 뭐가 다른지
#av_train_uids 는 clip_uid 임
#근데 아무도 말 하지 않는 상황 고려하려면 transcript 없는 시간대에 1???
#THUMOS : {video_validation_xx: {anno: , feature_length: } , video_validation_xxx : ,..}
#anno shape : (feature_length, 22)
av_train_annotation_path = '/scratch/junhyeok/ego4d_dataset/v2/annotations/av_train.json'
output_path = 'av_train_uids.json'
with open(av_train_annotation_path, "r") as f:
av_train_annotations = json.load(f)
# print(len(av_train_annotations['videos']))
print(av_train_annotations['videos'][152]['clips'][0]['video_start_frame'])
# bb7f1e75-65ee-4d98-adc8-c70551a35940
# print(av_train_annotations['videos'][0]['clips'][0]['clip_uid'])
# print('whole video start frame: ',av_train_annotations['videos'][0]['clips'][0]['video_start_frame'])
# print('whole video start frame: ',av_train_annotations['videos'][0]['clips'][0]['video_start_frame'])
# print('whole clip start frame: ',av_train_annotations['videos'][0]['clips'][0]['clip_start_frame'])
# print('whole clip end frame: ',av_train_annotations['videos'][0]['clips'][0]['clip_end_frame'])
# print('part video start frame:', av_train_annotations['videos'][0]['clips'][0]['transcriptions'][0]['video_start_frame'])
# print('part video end frame:', av_train_annotations['videos'][0]['clips'][0]['transcriptions'][0]['start_time_sec'])
# print('number of clips:', len(av_train_annotations['videos'][0]['clips']))
# num = len(av_train_annotations['videos'][0]['clips'])
# print('end of clip:', av_train_annotations['videos'][0]['clips'][num-1]['clip_end_frame'])
# trans_num = len(av_train_annotations['videos'][0]['clips'][num-1]['transcriptions'])
# print('end of video:', av_train_annotations['videos'][0]['clips'][num-1]['transcriptions'][trans_num-1]['video_end_frame'])