Fayza38 commited on
Commit
4f48da2
·
verified ·
1 Parent(s): a4279dd

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +543 -187
pipeline.py CHANGED
@@ -5,7 +5,9 @@ import json
5
  import math
6
  import torch
7
  import librosa
 
8
  import numpy as np
 
9
  import mediapipe as mp
10
  from PIL import Image
11
  from transformers import AutoImageProcessor, AutoModelForImageClassification, pipeline
@@ -13,299 +15,653 @@ from sentence_transformers import SentenceTransformer, CrossEncoder
13
  from sklearn.metrics.pairwise import cosine_similarity
14
  from mediapipe.tasks import python
15
  from mediapipe.tasks.python import vision
16
- import warnings
17
 
18
- # Suppress unnecessary warnings for a cleaner console output
 
19
  warnings.filterwarnings("ignore", category=UserWarning)
20
  warnings.filterwarnings("ignore", category=FutureWarning)
21
 
22
- # Set device to GPU if available
23
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
-
25
- # --- Configuration & Mappings ---
26
-
27
- # Tone Mapping: 0: Hesitant, 1: Confident, 2: Unstable, 3: Natural
28
  TONE_MAPPING = {
29
  "Hesitant": 0,
30
  "Confident": 1,
31
  "Unstable": 2,
32
  "Natural": 3,
33
- "Excited": 3 # Excitement is treated as a high-energy Natural/Positive state
34
- }
35
-
36
- # Emotion Valence-Arousal coordinates for the Emotion Wheel
37
- emotion_va = {
38
- "happy": (0.8, 0.2), "fear": (0.2, 0.8), "angry": (-0.7, 0.65),
39
- "sad": (-0.65, -0.55), "surprise": (0.1, -0.75), "disgust": (0.6, -0.4), "neutral": (0.0, 0.0)
40
  }
41
 
42
- # Static data for drawing the Emotion Ring labels
43
- EMOTION_RING = [
44
- ("Happy", 0, 0.84), ("Surprise", 45, 0.84), ("Fear", 100, 0.84),
45
- ("Sad", 160, 0.84), ("Disgust", 215, 0.84), ("Angry", 270, 0.84)
46
- ]
47
-
48
- # --- Model Initialization ---
49
 
50
- # Download Mediapipe Task file if not exists
51
  MODEL_PATH = "face_landmarker.task"
52
  if not os.path.exists(MODEL_PATH):
53
- print("[INFO] Downloading Mediapipe Face Landmarker model...")
54
  os.system(f"wget -O {MODEL_PATH} -q https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task")
55
 
56
- # NLP Models
57
  asr = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1)
58
  semantic_model = SentenceTransformer("all-MiniLM-L6-v2")
59
  cross_encoder = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2")
60
 
61
- # Visual Emotion Model
62
  FACE_MODEL_NAME = "dima806/facial_emotions_image_detection"
63
  face_processor = AutoImageProcessor.from_pretrained(FACE_MODEL_NAME)
64
  face_model = AutoModelForImageClassification.from_pretrained(FACE_MODEL_NAME).to(device).eval()
65
 
66
- # --- Audio Analysis Modules ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  def extract_audio_features(y, sr):
69
- """Calculates physical audio properties like pitch, jitter, and energy."""
70
  duration = librosa.get_duration(y=y, sr=sr)
71
  if duration == 0:
72
  return {"pitch_std": 0, "jitter": 0, "energy_std": 0, "pause_ratio": 0, "speech_rate": 0}
73
 
74
- # Pitch tracking using YIN algorithm
75
  f0 = librosa.yin(y, fmin=75, fmax=300, sr=sr)
76
  f0 = f0[~np.isnan(f0)]
77
  pitch_std = np.std(f0) if len(f0) else 0
78
  jitter = np.mean(np.abs(np.diff(f0)) / np.maximum(f0[:-1], 1e-6)) if len(f0) > 1 else 0
79
 
80
- # Energy tracking (RMS)
81
  rms = librosa.feature.rms(y=y)[0]
82
  energy_std = np.std(rms)
83
 
84
- # Speech vs Pause detection
85
  intervals = librosa.effects.split(y, top_db=20)
86
  speech_duration = sum((e - s) for s, e in intervals) / sr
87
  pause_ratio = 1 - (speech_duration / duration) if duration > 0 else 0
88
 
89
- # Speech rate (Word onsets per second)
90
  oenv = librosa.onset.onset_strength(y=y, sr=sr)
91
  onsets = librosa.onset.onset_detect(onset_envelope=oenv, sr=sr)
92
  speech_rate = len(onsets) / duration if duration > 0 else 0
93
 
94
  return {
95
- "pitch_std": pitch_std, "jitter": jitter, "energy_std": energy_std,
96
- "pause_ratio": pause_ratio, "speech_rate": speech_rate
 
 
 
97
  }
98
 
 
99
  def compute_audio_scores(features, baseline=None):
100
- """Translates raw audio features into behavioral scores and tone IDs."""
 
 
 
101
  if baseline is None:
102
  baseline = {"pitch_std": 30.0, "energy_std": 0.05, "jitter": 0.02, "pause_ratio": 0.2, "speech_rate": 4.0}
103
 
104
- # Relative ratios compared to personal baseline
105
- p_ratio = features["pitch_std"] / max(baseline["pitch_std"], 1e-6)
106
- e_ratio = features["energy_std"] / max(baseline["energy_std"], 1e-6)
107
- r_ratio = features["speech_rate"] / max(baseline["speech_rate"], 1e-6)
 
 
 
 
 
 
 
 
 
 
108
 
109
- # Behavioral Formulas
110
- stress = np.clip((abs(1 - p_ratio) * 0.4 + abs(1 - e_ratio) * 0.4 + features["jitter"] * 0.2) * 150 + 20, 0, 100)
111
- clarity = np.clip(100 - (max(0, features["pause_ratio"] - baseline["pause_ratio"]) * 120 + features["jitter"] * 400), 0, 100)
112
- conf_audio = np.clip(100 - (abs(1 - r_ratio) * 40 + abs(1 - e_ratio) * 30 + features["pause_ratio"] * 50), 0, 100)
113
 
114
- # Tone Classification
115
  tones = {
116
- "Confident": conf_audio,
117
  "Hesitant": features["pause_ratio"] * 150,
 
118
  "Unstable": stress,
119
- "Natural": 100 - (abs(1 - p_ratio) * 60 + abs(1 - r_ratio) * 40)
120
  }
121
- dom_tone_name = max(tones, key=tones.get)
122
-
 
123
  return {
124
- "confidence_audio": round(float(conf_audio), 2),
125
- "clarity": round(float(clarity), 2),
126
- "stress": round(float(stress), 2),
127
  "pauses": round(float(features["pause_ratio"] * 100), 2),
128
- "tone_of_voice": TONE_MAPPING.get(dom_tone_name, 3) # Backend Integer ID
129
  }
130
 
131
- # --- Visual Analysis & Drawing ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
 
133
  def compute_eye_contact_ratio(frame, landmarks):
134
- """Calculates Eye Aspect Ratio (EAR) to estimate focus/contact."""
135
- if not landmarks: return 0.5
 
 
 
 
 
136
  h, w, _ = frame.shape
137
- def ear(idx):
138
- p = [np.array([landmarks[i].x * w, landmarks[i].y * h]) for i in idx]
139
- return (np.linalg.norm(p[1]-p[5]) + np.linalg.norm(p[2]-p[4])) / (2.0 * np.linalg.norm(p[0]-p[3]))
140
-
141
- avg_ear = (ear([33, 160, 158, 133, 153, 144]) + ear([362, 385, 387, 263, 373, 380])) / 2.0
142
- return min(max(avg_ear * 3, 0), 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  def analyze_face_emotion(frame):
145
- """Predicts emotion probabilities using Vision Transformer (ViT)."""
 
 
 
 
146
  rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
147
- img = Image.fromarray(rgb)
148
- inputs = face_processor(images=img, return_tensors="pt").to(device)
 
 
 
149
  with torch.no_grad():
150
  outputs = face_model(**inputs)
 
151
  probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0]
152
  labels = face_model.config.id2label
153
- return {labels[i].lower(): float(probs[i]) for i in range(len(probs))}
154
 
155
- def draw_face_ui(frame, x, y, w, h, emotion_label):
156
- """Draws professional bounding box and active emotion label."""
 
 
 
 
 
 
 
 
 
 
 
157
  color = (0, 255, 0)
158
- cv2.rectangle(frame, (x, y), (x+w, y+h), color, 1)
159
- # Corners
160
- c_len = 20
161
- for px, py, dx, dy in [(x,y,c_len,0), (x,y,0,c_len), (x+w,y,-c_len,0), (x+w,y,0,c_len), (x,y+h,c_len,0), (x,y+h,0,-c_len), (x+w,y+h,-c_len,0), (x+w,y+h,0,-c_len)]:
 
 
 
 
 
 
 
 
 
 
162
  cv2.line(frame, (px, py), (px+dx, py+dy), color, 4)
163
- cv2.putText(frame, emotion_label.upper(), (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  return frame
165
 
166
- def draw_emotion_wheel(panel, center, radius, valence, arousal, dominant_emo):
167
- """Renders the Valence-Arousal circular UI."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  cx, cy = center
169
- cv2.circle(panel, center, radius, (60, 60, 85), 2) # Outer Ring
170
- # Labels
171
- for label, angle, rf in EMOTION_RING:
172
- rad = math.radians(angle)
173
- lx, ly = int(cx + rf*radius*math.cos(rad)), int(cy - rf*radius*math.sin(rad))
174
- color = (0, 255, 200) if label.lower() == dominant_emo else (180, 180, 180)
175
- cv2.putText(panel, label, (lx-20, ly), cv2.FONT_HERSHEY_SIMPLEX, 0.35, color, 1)
176
- # Animated Dot
177
- dx, dy = int(cx + valence*radius*0.8), int(cy - arousal*radius*0.8)
178
- cv2.circle(panel, (dx, dy), 8, (255, 230, 60), -1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  return panel
180
 
181
- def draw_metric_bars(frame, x, y, confidence, clarity, stress):
182
- """Renders horizontal performance bars."""
183
- metrics = [("Confidence", confidence, (70, 180, 255)), ("Clarity", clarity, (100, 220, 150)), ("Stress", stress, (255, 120, 100))]
184
- for i, (label, val, col) in enumerate(metrics):
185
- curr_y = y + i * 40
186
- cv2.putText(frame, label, (x, curr_y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)
187
- cv2.rectangle(frame, (x, curr_y), (x+200, curr_y+15), (50,50,50), -1) # BG
188
- cv2.rectangle(frame, (x, curr_y), (x+int(val*2), curr_y+15), col, -1) # Fill
189
- return frame
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
- # --- Core Video Processing ---
 
 
192
 
193
- def process_video_segment(video_path, output_dir, q_id, audio_scores):
194
- """Analyzes visual data and generates annotated video."""
195
  base_options = python.BaseOptions(model_asset_path=MODEL_PATH)
196
- options = vision.FaceLandmarkerOptions(base_options=base_options, running_mode=vision.RunningMode.VIDEO)
197
-
198
  cap = cv2.VideoCapture(video_path)
199
- fps = cap.get(cv2.CAP_PROP_FPS)
200
- w, h = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
201
-
202
- temp_v = os.path.join(output_dir, f"annotated_{q_id}.mp4")
203
- out = cv2.VideoWriter(temp_v, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
204
-
205
- frame_idx, face_accum, eye_accum = 0, [], []
206
- s_v, s_a, dom_emo = 0.0, 0.0, "neutral"
 
207
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  with vision.FaceLandmarker.create_from_options(options) as landmarker:
209
  while cap.isOpened():
210
  ret, frame = cap.read()
211
- if not ret: break
212
-
213
- # Optimization: Run AI every 3 frames
214
- if frame_idx % 3 == 0:
215
- mp_img = mp.Image(image_format=mp.ImageFormat.SRGB, data=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
216
- res = landmarker.detect_for_video(mp_img, int((frame_idx/fps)*1000))
 
217
 
218
- if res.face_landmarks:
219
- lm = res.face_landmarks[0]
220
- emotions = analyze_face_emotion(frame)
221
- eye_s = compute_eye_contact_ratio(frame, lm)
222
-
223
- # Statistical accumulation
224
- face_accum.append((emotions.get("neutral", 0) + emotions.get("happy", 0)) * 100)
225
- eye_accum.append(eye_s)
226
-
227
- dom_emo = max(emotions, key=emotions.get)
228
- v_t = sum(emotion_va[e][0]*s for e,s in emotions.items() if e in emotion_va)
229
- a_t = sum(emotion_va[e][1]*s for e,s in emotions.items() if e in emotion_va)
230
-
231
- # Moving average for smooth UI animation
232
- s_v += 0.2 * (v_t - s_v)
233
- s_a += 0.2 * (a_t - s_a)
234
-
235
- # Draw Bounding Box
236
- xs, ys = [l.x*w for l in lm], [l.y*h for l in lm]
237
- draw_face_ui(frame, int(min(xs)), int(min(ys)), int(max(xs)-min(xs)), int(max(ys)-min(ys)), dom_emo)
238
 
239
- # UI Overlays
240
- draw_emotion_wheel(frame, (w-100, h-100), 70, s_v, s_a, dom_emo)
241
- draw_metric_bars(frame, 30, h-120, audio_scores["confidence_audio"], audio_scores["clarity"], audio_scores["stress"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
  out.write(frame)
244
  frame_idx += 1
245
-
246
  cap.release()
247
  out.release()
248
- return temp_v, np.mean(face_accum) if face_accum else 50, np.mean(eye_accum)*100 if eye_accum else 50
249
-
250
- # --- Main Entry Point ---
251
 
 
252
  def run_intervision_pipeline(video_path, questions_config, output_dir):
253
- """Main pipeline execution for all interview questions."""
 
 
254
  os.makedirs(output_dir, exist_ok=True)
255
-
256
- # Baseline for fairness (first 10s)
257
  try:
258
- yb, srb = librosa.load(video_path, sr=16000, duration=10)
259
- baseline = extract_audio_features(yb, srb)
260
- except: baseline = None
 
 
261
 
262
- final_reports, video_segments = [], []
263
 
264
  for q in questions_config:
265
  q_id = q['question_id']
266
  raw_seg = os.path.join(output_dir, f"q{q_id}_raw.mp4")
267
-
268
- # Clip segment using FFmpeg
269
- dur = q["end_time"] - q["start_time"]
270
- subprocess.run(['ffmpeg', '-y', '-ss', str(q["start_time"]), '-t', str(dur), '-i', video_path, '-c', 'copy', raw_seg], quiet=True)
271
-
272
- # Audio Processing
273
- y, sr = librosa.load(raw_seg, sr=16000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
  a_scores = compute_audio_scores(extract_audio_features(y, sr), baseline)
275
-
276
- # NLP Analysis
277
- transcription = asr(raw_seg)["text"].strip()
278
- sim_score = round(float(cosine_similarity(semantic_model.encode([transcription, q["ideal_answer"]]))[0][0]*100), 2)
279
- rel_score = round(float(1/(1+np.exp(-cross_encoder.predict([(q["question_text"], transcription)])[0]))*100), 2)
280
-
 
 
 
 
 
281
  # Visual Analysis
282
- ann_v, f_conf, e_conf = process_video_segment(raw_seg, output_dir, q_id, a_scores)
283
- final_v = os.path.join(output_dir, f"q{q_id}_final.mp4")
284
- subprocess.run(['ffmpeg', '-y', '-i', ann_v, '-i', raw_seg, '-map', '0:v', '-map', '1:a', '-c:v', 'copy', '-c:a', 'aac', final_v], quiet=True)
285
-
286
- video_segments.append(final_v)
287
- final_reports.append({
288
- "questionId": q_id,
289
- "userAnswerText": transcription,
290
- "toneOfVoice": a_scores["tone_of_voice"], # Integer ID (0-3)
291
- "clarity": a_scores["clarity"],
292
- "stress": a_scores["stress"],
293
- "confidence": round((a_scores["confidence_audio"] + f_conf + e_conf) / 3, 2),
294
- "pauses": a_scores["pauses"],
295
- "score": sim_score,
296
- "relevance": rel_score
297
- })
298
-
299
- # Final Concatenation
300
- if video_segments:
301
- list_p = os.path.join(output_dir, "list.txt")
302
- with open(list_p, "w") as f:
303
- for s in video_segments: f.write(f"file '{os.path.abspath(s)}'\n")
304
-
305
- final_out = os.path.join(output_dir, "Intervision_Final_Result.mp4")
306
- os.system(f"ffmpeg -f concat -safe 0 -i {list_p} -c copy -y {final_out}")
307
-
 
 
 
 
 
 
 
 
 
 
 
308
  with open(os.path.join(output_dir, "report.json"), "w") as f:
309
  json.dump({"listOfAnswerReport": final_reports}, f, indent=4)
310
-
311
- return "Pipeline Finished Successfully"
 
 
 
5
  import math
6
  import torch
7
  import librosa
8
+ import ffmpeg
9
  import numpy as np
10
+ import soundfile as sf
11
  import mediapipe as mp
12
  from PIL import Image
13
  from transformers import AutoImageProcessor, AutoModelForImageClassification, pipeline
 
15
  from sklearn.metrics.pairwise import cosine_similarity
16
  from mediapipe.tasks import python
17
  from mediapipe.tasks.python import vision
 
18
 
19
+ # Ignore unnecessary warnings
20
+ import warnings
21
  warnings.filterwarnings("ignore", category=UserWarning)
22
  warnings.filterwarnings("ignore", category=FutureWarning)
23
 
 
 
 
 
 
 
24
  TONE_MAPPING = {
25
  "Hesitant": 0,
26
  "Confident": 1,
27
  "Unstable": 2,
28
  "Natural": 3,
29
+ "Excited": 3
 
 
 
 
 
 
30
  }
31
 
32
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
33
 
34
+ # 2. Download and Initialize Mediapipe once (Global)
35
  MODEL_PATH = "face_landmarker.task"
36
  if not os.path.exists(MODEL_PATH):
 
37
  os.system(f"wget -O {MODEL_PATH} -q https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task")
38
 
39
+ # 3. Initialize Models
40
  asr = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1)
41
  semantic_model = SentenceTransformer("all-MiniLM-L6-v2")
42
  cross_encoder = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2")
43
 
 
44
  FACE_MODEL_NAME = "dima806/facial_emotions_image_detection"
45
  face_processor = AutoImageProcessor.from_pretrained(FACE_MODEL_NAME)
46
  face_model = AutoModelForImageClassification.from_pretrained(FACE_MODEL_NAME).to(device).eval()
47
 
48
+ # Emotion Mapping for Wheel
49
+ emotion_va = {
50
+ "happy": (0.8, 0.2), "fear": (0.2, 0.8), "angry": (-0.7, 0.65),
51
+ "sad": (-0.65, -0.55), "surprise": (0.1, -0.75), "disgust": (0.6, -0.4), "neutral": (0.0, 0.0)
52
+ }
53
+ EMOTION_RING = [
54
+ ("Happy", 0, 0.84), ("Surprise", 45, 0.84), ("Fear", 100, 0.84),
55
+ ("Sad", 160, 0.84), ("Disgust", 215, 0.84), ("Angry", 270, 0.84)
56
+ ]
57
+
58
+ ##Utility functions
59
+
60
+ def normalize(v, mn, mx):
61
+ return np.clip((v - mn) / (mx - mn), 0, 1) if mx - mn != 0 else 0.0
62
+
63
+ def extract_audio(v_in, a_out):
64
+ ffmpeg.input(v_in).output(a_out, ac=1, ar=16000).overwrite_output().run(quiet=True)
65
+
66
+ def merge_audio_video(v_in, a_in, v_out):
67
+ ffmpeg.output(ffmpeg.input(v_in).video, ffmpeg.input(a_in).audio, v_out, vcodec="libx264", acodec="aac").overwrite_output().run(quiet=True)
68
+
69
+ def draw_face_box(frame, x, y, w, h, emotion_name=""):
70
+ color, th, cl = (0, 255, 100), 2, 20 # Green color
71
+ cv2.rectangle(frame, (x, y), (x+w, y+h), color, 1)
72
+
73
+ # Add emotion name above face box
74
+ if emotion_name:
75
+ cv2.putText(
76
+ frame,
77
+ emotion_name.upper(),
78
+ (x + 10, y - 15),
79
+ cv2.FONT_HERSHEY_DUPLEX,
80
+ 0.7,
81
+ (0, 255, 100),
82
+ 2,
83
+ cv2.LINE_AA
84
+ )
85
+
86
+ # Corners
87
+ for px, py, dx, dy in [(x,y,cl,0), (x,y,0,cl), (x+w,y,-cl,0), (x+w,y,0,cl), (x,y+h,cl,0), (x,y+h,0,-cl), (x+w,y+h,-cl,0), (x+w,y+h,0,-cl)]:
88
+ cv2.line(frame, (px, py), (px+dx, py+dy), color, 5)
89
+ return frame
90
+
91
+ def compute_eye_contact_ratio(frame, landmarks):
92
+ h, w, _ = frame.shape
93
+ def ear(idx):
94
+ p = [np.array([landmarks[i].x * w, landmarks[i].y * h]) for i in idx]
95
+ return (np.linalg.norm(p[1]-p[5]) + np.linalg.norm(p[2]-p[4])) / (2.0 * np.linalg.norm(p[0]-p[3]))
96
+ avg_ear = (ear([33, 160, 158, 133, 153, 144]) + ear([362, 385, 387, 263, 373, 380])) / 2.0
97
+ return min(max(avg_ear * 3, 0), 1)
98
+
99
+ def analyze_face_emotion(frame):
100
+ img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
101
+ inputs = face_processor(images=img, return_tensors="pt").to(device)
102
+ with torch.no_grad():
103
+ outputs = face_model(**inputs)
104
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0]
105
+ return {face_model.config.id2label[i].lower(): float(probs[i]) for i in range(len(probs))}
106
+
107
+ ##Audio analysis
108
 
109
  def extract_audio_features(y, sr):
 
110
  duration = librosa.get_duration(y=y, sr=sr)
111
  if duration == 0:
112
  return {"pitch_std": 0, "jitter": 0, "energy_std": 0, "pause_ratio": 0, "speech_rate": 0}
113
 
114
+ # Pitch & Jitter
115
  f0 = librosa.yin(y, fmin=75, fmax=300, sr=sr)
116
  f0 = f0[~np.isnan(f0)]
117
  pitch_std = np.std(f0) if len(f0) else 0
118
  jitter = np.mean(np.abs(np.diff(f0)) / np.maximum(f0[:-1], 1e-6)) if len(f0) > 1 else 0
119
 
120
+ # Energy
121
  rms = librosa.feature.rms(y=y)[0]
122
  energy_std = np.std(rms)
123
 
 
124
  intervals = librosa.effects.split(y, top_db=20)
125
  speech_duration = sum((e - s) for s, e in intervals) / sr
126
  pause_ratio = 1 - (speech_duration / duration) if duration > 0 else 0
127
 
128
+ # Speech Rate
129
  oenv = librosa.onset.onset_strength(y=y, sr=sr)
130
  onsets = librosa.onset.onset_detect(onset_envelope=oenv, sr=sr)
131
  speech_rate = len(onsets) / duration if duration > 0 else 0
132
 
133
  return {
134
+ "pitch_std": pitch_std,
135
+ "jitter": jitter,
136
+ "energy_std": energy_std,
137
+ "pause_ratio": pause_ratio,
138
+ "speech_rate": speech_rate
139
  }
140
 
141
+
142
  def compute_audio_scores(features, baseline=None):
143
+ """
144
+ Fairness-aware audio scoring with personal baseline comparison
145
+ """
146
+ # Use standard defaults if no baseline provided
147
  if baseline is None:
148
  baseline = {"pitch_std": 30.0, "energy_std": 0.05, "jitter": 0.02, "pause_ratio": 0.2, "speech_rate": 4.0}
149
 
150
+ # Calculate Relative Ratios (Current / Baseline)
151
+ pitch_ratio = features["pitch_std"] / max(baseline["pitch_std"], 1e-6)
152
+ energy_ratio = features["energy_std"] / max(baseline["energy_std"], 1e-6)
153
+ rate_ratio = features["speech_rate"] / max(baseline["speech_rate"], 1e-6)
154
+
155
+ # Stress Score (Relative)
156
+ pitch_dev = abs(1 - pitch_ratio)
157
+ energy_dev = abs(1 - energy_ratio)
158
+ stress_val = (pitch_dev * 0.4 + energy_dev * 0.4 + features["jitter"] * 0.2) * 150
159
+ stress = np.clip(stress_val + 20, 0, 100)
160
+
161
+ # Clarity Score (Relative)
162
+ pause_dev = max(0, features["pause_ratio"] - baseline["pause_ratio"])
163
+ clarity = 100 - (pause_dev * 120 + features["jitter"] * 400)
164
 
165
+ # Confidence Score (Relative)
166
+ rate_dev = abs(1 - rate_ratio)
167
+ confidence_audio = 100 - (rate_dev * 40 + energy_dev * 30 + features["pause_ratio"] * 50)
 
168
 
169
+ # Tone classification based on relative shifts
170
  tones = {
171
+ "Confident": confidence_audio,
172
  "Hesitant": features["pause_ratio"] * 150,
173
+ "Excited": (energy_ratio - 1) * 100 if energy_ratio > 1 else 0,
174
  "Unstable": stress,
175
+ "Natural": 100 - (pitch_dev * 60 + rate_dev * 40)
176
  }
177
+
178
+ dominant_tone = max(tones, key=tones.get)
179
+
180
  return {
181
+ "confidence_audio": round(float(np.clip(confidence_audio, 0, 100)), 2),
182
+ "clarity": round(float(np.clip(clarity, 0, 100)), 2),
183
+ "stress": round(float(np.clip(stress, 0, 100)), 2),
184
  "pauses": round(float(features["pause_ratio"] * 100), 2),
185
+ "tone_of_voice": TONE_MAPPING.get(dominant_tone, 3)
186
  }
187
 
188
+ def analyze_audio_segment(audio_path, baseline=None):
189
+ """
190
+ Main entry point for audio segment analysis
191
+ """
192
+ y, sr = librosa.load(audio_path, sr=16000)
193
+ features = extract_audio_features(y, sr)
194
+ return compute_audio_scores(features, baseline)
195
+
196
+
197
+ ##Text analysis
198
+
199
+ def get_user_answer(audio_path):
200
+ """Transcribe audio using Whisper"""
201
+ result = asr(audio_path, chunk_length_s=20)
202
+ return result["text"].strip()
203
+
204
+
205
+ def compute_similarity_score(user_answer, ideal_answer):
206
+ emb = semantic_model.encode([user_answer, ideal_answer])
207
+ sim = cosine_similarity([emb[0]], [emb[1]])[0][0]
208
+ score = float(sim * 100)
209
+ return round(max(0, score), 2)
210
+
211
+ def compute_relevance_score(question, user_answer):
212
+ raw_score = cross_encoder.predict([(question, user_answer)])[0]
213
+ prob = 1 / (1 + np.exp(-raw_score))
214
+ score = float(prob * 100)
215
+ return round(max(0, score), 2)
216
+
217
+ ##Video
218
+
219
+ # Eye indices
220
+ LEFT_EYE = [33, 160, 158, 133, 153, 144]
221
+ RIGHT_EYE = [362, 385, 387, 263, 373, 380]
222
 
223
+ # Eye Contact Function
224
  def compute_eye_contact_ratio(frame, landmarks):
225
+ """
226
+ Compute eye contact ratio from detected face landmarks
227
+ """
228
+
229
+ if not landmarks:
230
+ return 0.5
231
+
232
  h, w, _ = frame.shape
233
+
234
+ def ear(indices):
235
+ points = [
236
+ np.array([
237
+ landmarks[i].x * w,
238
+ landmarks[i].y * h
239
+ ])
240
+ for i in indices
241
+ ]
242
+
243
+ v1 = np.linalg.norm(points[1] - points[5])
244
+ v2 = np.linalg.norm(points[2] - points[4])
245
+ h_dist = np.linalg.norm(points[0] - points[3])
246
+
247
+ return (v1 + v2) / (2.0 * h_dist)
248
+
249
+ ear_left = ear(LEFT_EYE)
250
+ ear_right = ear(RIGHT_EYE)
251
+
252
+ avg_ear = (ear_left + ear_right) / 2.0
253
+
254
+ eye_score = min(max(avg_ear * 3, 0), 1)
255
+
256
+ return eye_score
257
 
258
  def analyze_face_emotion(frame):
259
+ """
260
+ Predict facial emotion probabilities from single frame
261
+ """
262
+
263
+ # Convert BGR to RGB
264
  rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
265
+ image = Image.fromarray(rgb)
266
+
267
+ # Preprocess
268
+ inputs = face_processor(images=image, return_tensors="pt").to(device)
269
+
270
  with torch.no_grad():
271
  outputs = face_model(**inputs)
272
+
273
  probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0]
274
  labels = face_model.config.id2label
 
275
 
276
+ emotion_probs = {
277
+ labels[i].lower(): float(probs[i])
278
+ for i in range(len(probs))
279
+ }
280
+
281
+ return emotion_probs
282
+
283
+ def draw_face_box(frame, x, y, w, h, emotion_label="Neutral"):
284
+ """
285
+ Draw face bounding box with emotion label above it
286
+ """
287
+
288
+ # Green color for face box
289
  color = (0, 255, 0)
290
+
291
+ thickness = 2
292
+ corner_len = 22
293
+
294
+ # Main rectangle
295
+ cv2.rectangle(frame, (x, y), (x+w, y+h), color, thickness)
296
+
297
+ # Decorative corner lines
298
+ for (px, py, dx, dy) in [
299
+ (x, y, corner_len, 0), (x, y, 0, corner_len),
300
+ (x+w, y, -corner_len, 0), (x+w, y, 0, corner_len),
301
+ (x, y+h, corner_len, 0), (x, y+h, 0, -corner_len),
302
+ (x+w, y+h, -corner_len, 0), (x+w, y+h, 0, -corner_len),
303
+ ]:
304
  cv2.line(frame, (px, py), (px+dx, py+dy), color, 4)
305
+
306
+ # Draw emotion text above the face box
307
+ label_text = emotion_label.capitalize()
308
+
309
+ (tw, th), _ = cv2.getTextSize(
310
+ label_text,
311
+ cv2.FONT_HERSHEY_SIMPLEX,
312
+ 0.7,
313
+ 2
314
+ )
315
+
316
+ text_x = x + (w - tw) // 2
317
+ text_y = y - 10
318
+
319
+ cv2.putText(
320
+ frame,
321
+ label_text,
322
+ (text_x, text_y),
323
+ cv2.FONT_HERSHEY_SIMPLEX,
324
+ 0.7,
325
+ (0, 255, 0),
326
+ 2,
327
+ cv2.LINE_AA
328
+ )
329
+
330
  return frame
331
 
332
+ def compute_valence_arousal_from_probs(emotion_probs):
333
+ """Computing Valence and Arousal from emotion probabilities"""
334
+ v, a, total = 0.0, 0.0, 0.0
335
+
336
+ for emo, score in emotion_probs.items():
337
+ emo = emo.lower()
338
+ if emo in emotion_va:
339
+ v += emotion_va[emo][0] * score
340
+ a += emotion_va[emo][1] * score
341
+ total += score
342
+
343
+ if total == 0:
344
+ return 0.0, 0.0
345
+
346
+ return v / total, a / total
347
+
348
+ def draw_full_emotion_wheel(panel, center, radius, valence, arousal,
349
+ dominant_emotion="neutral"):
350
  cx, cy = center
351
+
352
+ # Circle background
353
+ cv2.circle(panel, center, radius + 5, (15, 15, 25), -1)
354
+ cv2.circle(panel, center, radius, (60, 60, 85), 2)
355
+ for rf in [0.33, 0.66]:
356
+ cv2.circle(panel, center, int(radius * rf), (35, 35, 50), 1)
357
+
358
+ # Drawing dividing lines between emotions
359
+ for angle_deg in range(0, 360, 60):
360
+ rad = math.radians(angle_deg)
361
+ x1 = int(cx + radius * math.cos(rad))
362
+ y1 = int(cy - radius * math.sin(rad))
363
+ cv2.line(panel, (cx, cy), (x1, y1), (40, 40, 60), 1)
364
+
365
+ # Drawing emotion labels
366
+ ef, es, et = cv2.FONT_HERSHEY_SIMPLEX, 0.40, 1
367
+ for emotion_data in EMOTION_RING:
368
+ if emotion_data[1] is None:
369
+ continue
370
+
371
+ label, angle_deg, rf = emotion_data
372
+ rad = math.radians(angle_deg)
373
+ lx = int(cx + rf * radius * math.cos(rad))
374
+ ly = int(cy - rf * radius * math.sin(rad))
375
+ (tw, th), _ = cv2.getTextSize(label, ef, es, et)
376
+ tx, ty = lx - tw//2, ly + th//2
377
+
378
+ # Highlight active emotion
379
+ if label.lower() == dominant_emotion.lower():
380
+ cv2.putText(panel, label, (tx, ty), ef, es+0.08, (0, 255, 200), 2, cv2.LINE_AA)
381
+ else:
382
+ cv2.putText(panel, label, (tx, ty), ef, es, (190, 190, 255), et, cv2.LINE_AA)
383
+
384
+ # Neutral in center
385
+ nc = (0, 255, 200) if dominant_emotion == "neutral" else (160, 160, 160)
386
+ (tw, th), _ = cv2.getTextSize("Neutral", ef, es, et)
387
+ cv2.putText(panel, "Neutral", (cx-tw//2, cy+th//2), ef, es, nc, et, cv2.LINE_AA)
388
+
389
+ # Animated dot with glow
390
+ dot_x = int(cx + valence * radius * 0.88)
391
+ dot_y = int(cy - arousal * radius * 0.88)
392
+ cv2.circle(panel, (dot_x, dot_y), 15, (160, 120, 0), -1)
393
+ cv2.circle(panel, (dot_x, dot_y), 11, (220, 180, 0), -1)
394
+ cv2.circle(panel, (dot_x, dot_y), 7, (255, 230, 60), -1)
395
+
396
  return panel
397
 
398
+ BAR_CONFIGS = [
399
+ ("Confidence", (70, 180, 255), (30, 50, 100)), # light blue
400
+ ("Clarity", (100, 220, 150), (25, 70, 50)), # light cyan
401
+ ("Stress", (255, 120, 100), (100, 40, 30)), # light coral
402
+ ]
403
+
404
+ def draw_metric_bars(panel,
405
+ bars_x_start,
406
+ bar_y_top,
407
+ bar_height,
408
+ bar_width,
409
+ bar_gap,
410
+ confidence,
411
+ clarity,
412
+ stress):
413
+ """
414
+ Draw horizontal metric bars with label above each bar
415
+ """
416
+
417
+ values = [confidence, clarity, stress]
418
+ labels_list = ["Confidence", "Clarity", "Stress"]
419
+
420
+ # Extra vertical space for labels
421
+ label_space = 20
422
+
423
+ for i, value in enumerate(values):
424
+
425
+ label, fill_color, bg_color = BAR_CONFIGS[i]
426
+
427
+ # Each bar block height = label + bar + gap
428
+ y = bar_y_top + i * (bar_height + label_space + bar_gap)
429
+
430
+ x_right = bars_x_start + bar_width
431
+
432
+ filled = int((value / 100) * bar_width)
433
+
434
+ # Draw label above bar
435
+ cv2.putText(
436
+ panel,
437
+ label,
438
+ (bars_x_start, y),
439
+ cv2.FONT_HERSHEY_DUPLEX,
440
+ 0.6,
441
+ (230, 230, 230),
442
+ 1,
443
+ cv2.LINE_AA
444
+ )
445
+
446
+ # Move bar slightly down to leave space for label
447
+ bar_y = y + 8
448
+
449
+ # Draw background bar
450
+ cv2.rectangle(
451
+ panel,
452
+ (bars_x_start, bar_y),
453
+ (x_right, bar_y + bar_height),
454
+ bg_color,
455
+ -1
456
+ )
457
+
458
+ # Draw filled portion
459
+ cv2.rectangle(
460
+ panel,
461
+ (bars_x_start, bar_y),
462
+ (bars_x_start + filled, bar_y + bar_height),
463
+ fill_color,
464
+ -1
465
+ )
466
+
467
+ # Draw percentage text
468
+ cv2.putText(
469
+ panel,
470
+ f"{int(value)}%",
471
+ (bars_x_start + 12, bar_y + bar_height - 6),
472
+ cv2.FONT_HERSHEY_SIMPLEX,
473
+ 0.6,
474
+ (255, 255, 255),
475
+ 2,
476
+ cv2.LINE_AA
477
+ )
478
 
479
+ return panel
480
+
481
+ ##Integrated Video Processing (Analysis + Annotation)
482
 
483
+ def process_video_segment(video_path, output_dir, segment_id, audio_scores_global=None):
 
484
  base_options = python.BaseOptions(model_asset_path=MODEL_PATH)
485
+ options = vision.FaceLandmarkerOptions(base_options=base_options, running_mode=vision.RunningMode.VIDEO, num_faces=1)
486
+
487
  cap = cv2.VideoCapture(video_path)
488
+ fps, width, height = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
489
+ temp_video = os.path.join(output_dir, f"temp_annotated_{segment_id}.mp4")
490
+ # out = cv2.VideoWriter(temp_video, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
491
+ # Use 'avc1' or 'H264' for web compatibility
492
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
493
+ out = cv2.VideoWriter(temp_video, fourcc, fps, (width, height))
494
+
495
+ face_conf_accum, eye_accum, frame_idx = [], [], 0
496
+ smooth_v, smooth_a, dom_emo = 0.0, 0.0, "neutral"
497
 
498
+ # --- Optimization Variables ---
499
+ frame_stride = 3 # Process AI every 3 frames
500
+ last_results = None
501
+ last_emotions = None
502
+ last_eye_s = 0.5
503
+ last_lm = None
504
+ # ------------------------------
505
+
506
+ b_conf = audio_scores_global.get("confidence_audio", 50)
507
+ b_clar = audio_scores_global.get("clarity", 50)
508
+ b_stress = audio_scores_global.get("stress", 20)
509
+
510
  with vision.FaceLandmarker.create_from_options(options) as landmarker:
511
  while cap.isOpened():
512
  ret, frame = cap.read()
513
+ if not ret:
514
+ break
515
+
516
+ # 1. RUN HEAVY AI ONLY ON STRIDE FRAMES
517
+ if frame_idx % frame_stride == 0:
518
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
519
+ last_results = landmarker.detect_for_video(mp_image, int((frame_idx/fps)*1000))
520
 
521
+ if last_results.face_landmarks:
522
+ last_lm = last_results.face_landmarks[0]
523
+ last_emotions = analyze_face_emotion(frame)
524
+ last_eye_s = compute_eye_contact_ratio(frame, last_lm)
525
+
526
+ # 2. USE LAST KNOWN DATA FOR CALCULATIONS & DRAWING
527
+ d_conf, d_clar, d_stress = b_conf, b_clar, b_stress
 
 
 
 
 
 
 
 
 
 
 
 
 
528
 
529
+ if last_results and last_results.face_landmarks:
530
+ # Use current local variables from 'last' successful AI run
531
+ curr_f_conf = (last_emotions.get("neutral", 0) + last_emotions.get("happy", 0)) * 100
532
+ d_conf = (b_conf * 0.7) + (curr_f_conf * 0.3)
533
+ d_clar = (b_clar * 0.8) + (last_eye_s * 100 * 0.2)
534
+ d_stress = (b_stress * 0.7) + ((last_emotions.get("sad",0)+last_emotions.get("angry",0))*30)
535
+
536
+ # Update accumulators only on stride frames to keep averages accurate
537
+ if frame_idx % frame_stride == 0:
538
+ face_conf_accum.append(curr_f_conf)
539
+ eye_accum.append(last_eye_s)
540
+
541
+ dom_emo = max(last_emotions, key=last_emotions.get)
542
+ v_t = sum(emotion_va[e][0]*s for e,s in last_emotions.items() if e in emotion_va)
543
+ a_t = sum(emotion_va[e][1]*s for e,s in last_emotions.items() if e in emotion_va)
544
+
545
+ # Keep smoothing every frame for fluid movement
546
+ smooth_v += 0.15 * (v_t - smooth_v)
547
+ smooth_a += 0.15 * (a_t - smooth_a)
548
+
549
+ # Draw face box using the last known landmarks
550
+ xs, ys = [l.x*width for l in last_lm], [l.y*height for l in last_lm]
551
+ draw_face_box(
552
+ frame,
553
+ int(min(xs)), int(min(ys)),
554
+ int(max(xs) - min(xs)), int(max(ys) - min(ys)),
555
+ dom_emo
556
+ )
557
+
558
+ # 3. ALWAYS DRAW UI (Wheel and Bars)
559
+ frame = draw_full_emotion_wheel(frame, (width-130, height-100), 90, smooth_v, smooth_a, dom_emo)
560
+ frame = draw_metric_bars(frame, 30, height-160, 28, 200, 6, d_conf, d_clar, d_stress)
561
 
562
  out.write(frame)
563
  frame_idx += 1
564
+
565
  cap.release()
566
  out.release()
567
+ return temp_video, np.mean(face_conf_accum) if face_conf_accum else 50, np.mean(eye_accum)*100 if eye_accum else 50
 
 
568
 
569
+ ##Main pipeline
570
  def run_intervision_pipeline(video_path, questions_config, output_dir):
571
+ if not os.path.exists(video_path):
572
+ return f"Error: Video file not found at {video_path}"
573
+
574
  os.makedirs(output_dir, exist_ok=True)
575
+
576
+ # Establish baseline from first 10s
577
  try:
578
+ y_b, sr_b = librosa.load(video_path, sr=16000, duration=10)
579
+ baseline = extract_audio_features(y_b, sr_b)
580
+ except Exception as e:
581
+ print(f"Baseline Load Warning: {e}. Using defaults.")
582
+ baseline = None
583
 
584
+ final_reports, segments = [], []
585
 
586
  for q in questions_config:
587
  q_id = q['question_id']
588
  raw_seg = os.path.join(output_dir, f"q{q_id}_raw.mp4")
589
+ wav_p = os.path.join(output_dir, f"q{q_id}.wav")
590
+
591
+ # Precise FFmpeg cutting with error handling
592
+ duration = q["end_time"] - q["start_time"]
593
+ try:
594
+ subprocess.run([
595
+ 'ffmpeg', '-y', '-ss', str(q["start_time"]), '-t', str(duration),
596
+ '-i', video_path, '-c:v', 'libx264', '-c:a', 'aac', '-strict', 'experimental', raw_seg
597
+ ], check=True, capture_output=True)
598
+ except subprocess.CalledProcessError as e:
599
+ print(f"Skipping Question {q_id}: Time range might be out of video bounds.")
600
+ continue
601
+
602
+ # Audio Extraction
603
+ try:
604
+ y, sr = librosa.load(raw_seg, sr=16000)
605
+ import soundfile as sf
606
+ sf.write(wav_p, y, sr)
607
+ except Exception as e:
608
+ print(f"Error extracting audio for Q{q_id}: {e}")
609
+ continue
610
+
611
+ # Audio Analysis
612
  a_scores = compute_audio_scores(extract_audio_features(y, sr), baseline)
613
+
614
+ # Whisper Transcription
615
+ try:
616
+ transcription_data = asr(wav_p, chunk_length_s=30, return_timestamps=True)
617
+ transcription = transcription_data["text"].strip()
618
+ except:
619
+ transcription = "[Transcription Error]"
620
+
621
+ similarity_score = compute_similarity_score(transcription, q["ideal_answer"])
622
+ relevance_score = compute_relevance_score(q["question_text"], transcription)
623
+
624
  # Visual Analysis
625
+ try:
626
+ ann_v, f_c, e_c = process_video_segment(raw_seg, output_dir, q_id, a_scores)
627
+
628
+ final_v = os.path.join(output_dir, f"q{q_id}_final.mp4")
629
+ subprocess.run([
630
+ 'ffmpeg', '-y', '-i', ann_v, '-i', raw_seg, '-map', '0:v', '-map', '1:a',
631
+ '-c:v', 'copy', '-c:a', 'aac', final_v
632
+ ], check=True, capture_output=True)
633
+
634
+ segments.append(final_v)
635
+
636
+ final_reports.append({
637
+ "questionId": q_id,
638
+ "userAnswerText": transcription,
639
+ "toneOfVoice": a_scores["tone_of_voice"],
640
+ "clarity": a_scores["clarity"],
641
+ "stress": a_scores["stress"],
642
+ "confidence": round((a_scores["confidence_audio"] + f_c + e_c) / 3, 2),
643
+ "pauses": a_scores["pauses"],
644
+ "score": similarity_score,
645
+ "relevance": relevance_score
646
+ })
647
+ except Exception as e:
648
+ print(f"Visual analysis failed for Q{q_id}: {e}")
649
+
650
+ torch.cuda.empty_cache()
651
+
652
+ # Final concatenation
653
+ if segments:
654
+ list_path = os.path.join(output_dir, "list.txt")
655
+ with open(list_path, "w") as f:
656
+ for s in segments:
657
+ f.write(f"file '{os.path.abspath(s)}'\n")
658
+
659
+ final_output = os.path.join(output_dir, "Intervision_Final_Result.mp4")
660
+ os.system(f"ffmpeg -f concat -safe 0 -i {list_path} -c:v libx264 -preset superfast -crf 23 -c:a aac -y {final_output}")
661
+
662
  with open(os.path.join(output_dir, "report.json"), "w") as f:
663
  json.dump({"listOfAnswerReport": final_reports}, f, indent=4)
664
+
665
+ return f"Successfully processed {len(segments)} questions."
666
+ else:
667
+ return "No segments were processed. Check your video time ranges."