Rahulk2197 commited on
Commit
bdd8282
·
verified ·
1 Parent(s): ee94b36

Upload 2 files

Browse files
Files changed (2) hide show
  1. main.py +125 -0
  2. requirements.txt +0 -0
main.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.filterwarnings('ignore', category=UserWarning, module='tensorflow')
3
+ import os
4
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
5
+ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
6
+ import logging
7
+ logging.getLogger('absl').setLevel(logging.ERROR)
8
+ from functions.models import models_dict
9
+ from functions.helper import extract_faces_from_frames
10
+ from functions.video import eyebrow,blinks,detect_yawn,detect_smiles
11
+ from functions.valence_arousal import va_predict
12
+ from functions.fer import fer_predict,plot_graph
13
+ from moviepy.editor import VideoFileClip
14
+ import json
15
+ import pandas as pd
16
+ from typing import Callable
17
+ from functions.audio import extract_audio_features
18
+ asrmodel=models_dict['asrmodel']
19
+ asrproc=models_dict['asrproc']
20
+ sentipipe=models_dict['sentipipe']
21
+ valence_arousal_model=models_dict['valence_fer'][1]
22
+ val_ar_feat_model=models_dict['valence_fer'][0]
23
+ fer_model=models_dict['fer']
24
+ smile_cascade=models_dict['smile_cascade']
25
+ dnn_net=models_dict['face'][0]
26
+ predictor=models_dict['face'][1]
27
+ fps=30
28
+ session_data={}
29
+
30
+ def analyze_live_video(video_path: str, uid: str, user_id: str, count: int, final: bool, log: Callable[[str], None]):
31
+ try:
32
+ global session_data
33
+ if uid not in session_data:
34
+ session_data[uid]={
35
+ "vcount":[],
36
+ "duration":[],
37
+
38
+ "audio":[],
39
+
40
+ "blinks":[],
41
+ "yawn":[],
42
+ "smile":[],
43
+ "eyebrow":[],
44
+
45
+ "fer": [],
46
+ "valence":[],
47
+ "arousal":[],
48
+ "stress":[],
49
+ }
50
+ print(f"UID: {uid}, User ID: {user_id}, Count: {count}, Final: {final}, Video: {video_path}")
51
+ print(f"analysing video for question - {count}")
52
+
53
+ output_dir = os.path.join('output', uid)
54
+ os.makedirs(output_dir,exist_ok=True)
55
+
56
+ folder_path=os.path.join(output_dir,f'{count}')
57
+ os.makedirs(folder_path,exist_ok=True)
58
+ meta_data_path=os.path.join(folder_path,'metadata.json')
59
+ valence_plot=os.path.join(folder_path,"vas.png")
60
+ df_path=os.path.join(folder_path,'data.csv')
61
+
62
+ video_clip=VideoFileClip(video_path)
63
+ video_clip=video_clip.set_fps(fps)
64
+ duration=video_clip.duration
65
+ print(duration)
66
+ audio=video_clip.audio
67
+ audio_path = os.path.join(folder_path,'extracted_audio.wav')
68
+ print(audio_path)
69
+ audio.write_audiofile(audio_path)
70
+ video_frames=[frame for frame in video_clip.iter_frames()]
71
+ faces, landmarks, sizes=extract_faces_from_frames(video_frames,dnn_net,predictor)
72
+
73
+
74
+ # faces=[extract_face(frame) for frame in tqdm(video_frames)]
75
+ af=extract_audio_features(audio_path,asrmodel,asrproc,sentipipe)
76
+
77
+
78
+ fer_emotions,class_wise_frame_count,em_tensors=fer_predict(faces,fps,fer_model)
79
+ valence_list,arousal_list,stress_list=va_predict(valence_arousal_model,val_ar_feat_model,faces,list(em_tensors))
80
+ timestamps=[j/fps for j in range(len(valence_list))]
81
+
82
+ eyebrow_dist=eyebrow(landmarks,sizes)
83
+ print('eyebrow done')
84
+ blink_durations,total_blinks=blinks(landmarks,sizes,fps)
85
+ print('blinks done')
86
+ smiles,smile_count=detect_smiles(faces,smile_cascade)
87
+ print('smiles done')
88
+ yawn,normalized_lip_distances,yawn_count=detect_yawn(landmarks,sizes)
89
+ print('ywan done')
90
+
91
+
92
+ y_vals = [valence_list, arousal_list, stress_list,eyebrow_dist]
93
+ labels = ['Valence', 'Arousal', 'Stress',"EyeBrowDistance"]
94
+ plot_graph(timestamps, y_vals, labels, valence_plot)
95
+ print('graph_plotted')
96
+ meta_data={}
97
+ meta_data['facial_emotion_recognition'] = {
98
+ "class_wise_frame_count": class_wise_frame_count,
99
+ }
100
+ meta_data['audio']=af
101
+
102
+ meta_data['blinks']={
103
+ 'blink_durations':blink_durations,
104
+ 'total_blinks':total_blinks
105
+ }
106
+ meta_data['smile']=smile_count
107
+ meta_data['yawn']=yawn_count
108
+ with open(meta_data_path, 'w') as json_file:
109
+ json.dump(meta_data, json_file, indent=4)
110
+ df=pd.DataFrame(
111
+ {
112
+ 'timestamps':timestamps,
113
+ 'fer': fer_emotions,
114
+ 'valence': valence_list,
115
+ 'arousal': arousal_list,
116
+ 'stress': stress_list,
117
+ 'eyebrow':eyebrow_dist,
118
+ }
119
+ )
120
+ df.to_csv(df_path,index=False)
121
+ except Exception as e:
122
+ print("Error analyzing video...: ", e)
123
+
124
+ # analyze_live_video('s1.mp4','1',1,1,True,print)
125
+
requirements.txt ADDED
Binary file (3.6 kB). View file