File size: 2,484 Bytes
fb5d697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import json
import os
from glob import glob

import numpy as np

# 24.05.06) made new .py file from .ipynb
# from _easycom_anno

transcript_path = '../../mmaction2/data/EasyComDataset_fix/Main/Speech_Transcriptions/'
unified_transcript_path = 'unified_transcripts'
metadata_path = '../../notebooks/results/easycom_metadata.json'

annotation_path = '/home/junhyeok/projects/turn-taking/datasets/EasyCom/target_perframe/case1'
# print(glob(os.path.join(transcript_path, '*')))
# transcript_files = glob(os.path.join(transcript_path, '*', '*', '*.json'))
# transcript_files.sort()
# print(transcript_files)

with open(metadata_path, 'r') as f:
  metadata = json.load(f)

print(metadata)
print(metadata.keys())

video_fps = 20
anno_fps = 5
chunk_size = video_fps / anno_fps

if chunk_size != int(chunk_size):
  print("Chunk size is not an integer")
  exit()

chunk_size = int(chunk_size)

unified_transcripts = {}

annotation = {}
for session in metadata.keys():
  session_metadata = metadata[session]
  print(session)
  session_total_frames = int(session_metadata['total_frames']) // chunk_size
  anno = np.zeros((session_total_frames, 3))
  annotation[session] = {}
  
  # Initialize an empty list to store transcript data for the current session
  session_transcripts = []
  
  frame_idx = 0
  for seg in session_metadata['segments']:
    video_name = seg['video_name']
    print(seg['video_name'])
    transcript_pattern = os.path.join(transcript_path, '*', session, f"{video_name}.json")
    transcript_file = glob(transcript_pattern)
    
    if len(transcript_file) > 1:
      print("something is wrong")
      exit()
    
    transcript_file = transcript_file[0]
    
    # EasyCom frames start with 1. (of course, there is no frame 0 in video)
    
    # 11/22) then, shouldn't we start with 0 in annotation, so we need to subtract 1 from the start frame
    # 11/22 we didn't do this
    
    # Load and process the transcript file as needed
    with open(transcript_file, 'r') as tf:
        transcript_data = json.load(tf)
        for utt in transcript_data:
            utt['Start_Frame'] += frame_idx
            utt['End_Frame'] += frame_idx
        session_transcripts.extend(transcript_data)
    
    print(transcript_data[0])
    
    frame_idx += seg['num_frames']
    
    # Save the unified transcript as a JSON file
    with open(os.path.join(unified_transcript_path, f"{session}.json"), 'w') as json_file:
        json.dump(session_transcripts, json_file, indent=4)