QFVS / preprocess.py
ToughStone's picture
Upload 751 files
fc385a9 verified
import h5py
import torch as t
import numpy as np
from segment import cpd_auto
max_segment_num = 20
max_frame_num = 200
for kind in ["clip_ViT-B16_norm_avg"]: #"clip_ViT-B32_norm_avg", "clip_ViT-L14_norm_avg", "clip_ViT-L14@336px_norm_avg"
print(kind)
for video_id in ["V1","V2","V3","V4"]:
f=h5py.File('./features/'+video_id+'_'+kind+'.h5','r')
feature=f['feature'][()]
frame_num=feature.shape[0]
print(frame_num)
K=feature
K=np.dot(K,K.T)
cps,_=cpd_auto(K,max_segment_num-1,1,desc_rate=1,verbose=False,lmax=max_frame_num-1) #int(K.shape[0]/25)
seg_num=len(cps)+1
assert seg_num<=max_segment_num
seg_points=cps
seg_points=np.insert(seg_points,0,0)
seg_points=np.append(seg_points,frame_num)
segments=[]
for i in range(seg_num):
segments.append(np.arange(seg_points[i],seg_points[i+1],1,dtype=np.int32))
assert len(segments)<=max_segment_num
for seg in segments:
assert len(seg)<=max_frame_num
seg_len=np.zeros((max_segment_num),dtype=np.int32)
for index,seg in enumerate(segments):
seg_len[index]=len(seg)
# features
for seg in segments:
for frame in seg:
assert frame<frame_num
if kind=="clip_ViT-B16_norm_avg":
feature_dim=512
elif kind=="clip_ViT-B32_norm_avg":
feature_dim=512
elif kind=="clip_ViT-L14_norm_avg":
feature_dim=768
else:
feature_dim=768
features=t.zeros((max_segment_num, max_frame_num, feature_dim))
for seg_index,seg in enumerate(segments):
for frame_index,frame in enumerate(seg):
features[seg_index,frame_index]=t.tensor(feature[frame])
# features[seg_index,frame_index]=F.avg_pool1d(t.tensor(feature[frame]).unsqueeze(0).unsqueeze(0),kernel_size=2,stride=2)
f=h5py.File('./processed/'+video_id+'_'+kind+'.h5','w')
f.create_dataset('features', data=features)
f.create_dataset('seg_len', data=seg_len)
f.close()