import sys import pickle import cv2 import numpy as np from moviepy.editor import VideoFileClip from moviepy.audio.io.AudioFileClip import AudioFileClip def get_label_str(value_list): labels = ['background', 'wearer speaking start', 'wearer speaking', 'other person speaking'] decoded_labels = [] for label_str in value_list: binary_string = ''.join(map(lambda x: '1' if x else '0', label_str)) temp = [] cnt = 0 for i, bit in enumerate(binary_string): if bit == '1': cnt+=1 temp.append(labels[i]) if cnt > 1: temp = ' & '.join(temp) print(binary_string, temp) else: temp = temp[0] decoded_labels.append(temp) return decoded_labels def view_pkl(pickle_file): with open(pickle_file, 'rb') as f: data = pickle.load(f) predict_list = data['prediction'] target_list = data['target'] target_value_list = get_label_str(target_list) new_list = [] max_list = [] for row in predict_list: new_row = [round(value / 10000, 3) for value in row] new_list.append(new_row) max_index = np.argmax(row) result = np.zeros_like(row) result[max_index] = 1 max_list.append(result) max_value_list = get_label_str(max_list) predict_list = new_list return predict_list, target_value_list, max_value_list, target_list def draw_text(img, text, font=cv2.FONT_HERSHEY_SIMPLEX, pos=(0, 0), font_scale=1, font_thickness=2, text_color=(255, 255, 255), text_color_bg=(0, 0, 0) ): x, y = pos text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness) text_w, text_h = text_size cv2.rectangle(img, pos, (x + text_w, y + text_h), text_color_bg, -1) cv2.putText(img, text, (x, y + text_h + font_scale - 1), font, font_scale, text_color, font_thickness) return text_size def video_anno(predict_list, target_list, max_list, input_file_path, output_file_path, test_list): cap = cv2.VideoCapture(input_file_path) filename = input_file_path.split('/')[1].split('.')[0] frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) fps = int(cap.get(5)) frame_count = int(cap.get(7)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_file_path, fourcc, fps, (frame_width, frame_height)) print(len(predict_list)) frame_number = 0 anno_num = 0 sig_num = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break frame_number+=1 sig_num+=1 if anno_num < len(predict_list): #print(anno_num) if frame_number>=384 and sig_num==6: sig_num = 0 text_GT = f'Ground Truth: {frame_number} {target_list[anno_num]}' text_PR = f'Model Predict: {frame_number} {max_list[anno_num]}' text_DT = f'Predict detail: {frame_number} {predict_list[anno_num]}' anno_num+=1 if sig_num > 5: sig_num = 0 if frame_number >= 384: draw_text(frame, text_GT, pos=(10, 10)) draw_text(frame, text_PR, pos=(10, 50)) draw_text(frame, text_DT, pos=(10, 100)) out.write(frame) cap.release() out.release() cv2.destroyAllWindows() def main(save, save2, pickle_file, audio_path): video = '/original/video/path.mp4' video_clip = VideoFileClip(video) audio_clip = video_clip.audio audio_clip.write_audiofile(audio_path) predict_list, target_list, max_list, test_list = view_pkl(pickle_file) video_anno(predict_list, target_list, max_list, video, save, test_list) # save => 'test_video/00792fa8-988c-4c85-8e80-73eb3ac53e80_anno2.mp4' video_clip = VideoFileClip(save) audio_clip = AudioFileClip(audio_path) video_clip = video_clip.set_audio(audio_clip) video_clip.write_videofile(save2, codec='libx264', audio_codec='aac') video_clip.close() audio_clip.close() if __name__ == '__main__': main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])