Update pipeline.py
Browse files- pipeline.py +172 -128
pipeline.py
CHANGED
|
@@ -480,32 +480,41 @@ def draw_metric_bars(panel,
|
|
| 480 |
|
| 481 |
##Integrated Video Processing (Analysis + Annotation)
|
| 482 |
|
| 483 |
-
def
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
|
|
|
|
|
|
|
|
|
| 487 |
cap = cv2.VideoCapture(video_path)
|
| 488 |
-
fps
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 497 |
|
| 498 |
-
#
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
last_emotions = None
|
| 502 |
-
last_eye_s = 0.5
|
| 503 |
-
last_lm = None
|
| 504 |
-
# ------------------------------
|
| 505 |
-
|
| 506 |
-
b_conf = audio_scores_global.get("confidence_audio", 50)
|
| 507 |
-
b_clar = audio_scores_global.get("clarity", 50)
|
| 508 |
-
b_stress = audio_scores_global.get("stress", 20)
|
| 509 |
|
| 510 |
with vision.FaceLandmarker.create_from_options(options) as landmarker:
|
| 511 |
while cap.isOpened():
|
|
@@ -513,58 +522,109 @@ def process_video_segment(video_path, output_dir, segment_id, audio_scores_globa
|
|
| 513 |
if not ret:
|
| 514 |
break
|
| 515 |
|
| 516 |
-
|
| 517 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 518 |
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 519 |
-
|
| 520 |
|
| 521 |
-
if
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
|
| 526 |
-
# 2. USE LAST KNOWN DATA FOR CALCULATIONS & DRAWING
|
| 527 |
-
d_conf, d_clar, d_stress = b_conf, b_clar, b_stress
|
| 528 |
-
|
| 529 |
-
if last_results and last_results.face_landmarks:
|
| 530 |
-
# Use current local variables from 'last' successful AI run
|
| 531 |
-
curr_f_conf = (last_emotions.get("neutral", 0) + last_emotions.get("happy", 0)) * 100
|
| 532 |
-
d_conf = (b_conf * 0.7) + (curr_f_conf * 0.3)
|
| 533 |
-
d_clar = (b_clar * 0.8) + (last_eye_s * 100 * 0.2)
|
| 534 |
-
d_stress = (b_stress * 0.7) + ((last_emotions.get("sad",0)+last_emotions.get("angry",0))*30)
|
| 535 |
-
|
| 536 |
-
# Update accumulators only on stride frames to keep averages accurate
|
| 537 |
-
if frame_idx % frame_stride == 0:
|
| 538 |
-
face_conf_accum.append(curr_f_conf)
|
| 539 |
-
eye_accum.append(last_eye_s)
|
| 540 |
-
|
| 541 |
-
dom_emo = max(last_emotions, key=last_emotions.get)
|
| 542 |
-
v_t = sum(emotion_va[e][0]*s for e,s in last_emotions.items() if e in emotion_va)
|
| 543 |
-
a_t = sum(emotion_va[e][1]*s for e,s in last_emotions.items() if e in emotion_va)
|
| 544 |
-
|
| 545 |
-
# Keep smoothing every frame for fluid movement
|
| 546 |
-
smooth_v += 0.15 * (v_t - smooth_v)
|
| 547 |
-
smooth_a += 0.15 * (a_t - smooth_a)
|
| 548 |
-
|
| 549 |
-
# Draw face box using the last known landmarks
|
| 550 |
-
xs, ys = [l.x*width for l in last_lm], [l.y*height for l in last_lm]
|
| 551 |
-
draw_face_box(
|
| 552 |
-
frame,
|
| 553 |
-
int(min(xs)), int(min(ys)),
|
| 554 |
-
int(max(xs) - min(xs)), int(max(ys) - min(ys)),
|
| 555 |
-
dom_emo
|
| 556 |
-
)
|
| 557 |
-
|
| 558 |
-
# 3. ALWAYS DRAW UI (Wheel and Bars)
|
| 559 |
-
frame = draw_full_emotion_wheel(frame, (width-130, height-100), 90, smooth_v, smooth_a, dom_emo)
|
| 560 |
-
frame = draw_metric_bars(frame, 30, height-160, 28, 200, 6, d_conf, d_clar, d_stress)
|
| 561 |
-
|
| 562 |
out.write(frame)
|
| 563 |
frame_idx += 1
|
| 564 |
|
| 565 |
cap.release()
|
| 566 |
out.release()
|
| 567 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 568 |
|
| 569 |
##Main pipeline
|
| 570 |
def run_intervision_pipeline(video_path, questions_config, output_dir):
|
|
@@ -573,95 +633,79 @@ def run_intervision_pipeline(video_path, questions_config, output_dir):
|
|
| 573 |
|
| 574 |
os.makedirs(output_dir, exist_ok=True)
|
| 575 |
|
| 576 |
-
# Establish
|
| 577 |
try:
|
| 578 |
y_b, sr_b = librosa.load(video_path, sr=16000, duration=10)
|
| 579 |
baseline = extract_audio_features(y_b, sr_b)
|
| 580 |
except Exception as e:
|
| 581 |
-
print(f"Baseline
|
| 582 |
baseline = None
|
| 583 |
|
| 584 |
-
final_reports
|
|
|
|
| 585 |
|
|
|
|
| 586 |
for q in questions_config:
|
| 587 |
q_id = q['question_id']
|
| 588 |
-
raw_seg = os.path.join(output_dir, f"q{q_id}_raw.mp4")
|
| 589 |
wav_p = os.path.join(output_dir, f"q{q_id}.wav")
|
| 590 |
-
|
| 591 |
-
# Precise FFmpeg cutting with error handling
|
| 592 |
duration = q["end_time"] - q["start_time"]
|
|
|
|
|
|
|
| 593 |
try:
|
| 594 |
subprocess.run([
|
| 595 |
'ffmpeg', '-y', '-ss', str(q["start_time"]), '-t', str(duration),
|
| 596 |
-
'-i', video_path, '-
|
| 597 |
], check=True, capture_output=True)
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
y, sr = librosa.load(raw_seg, sr=16000)
|
| 605 |
-
import soundfile as sf
|
| 606 |
-
sf.write(wav_p, y, sr)
|
| 607 |
-
except Exception as e:
|
| 608 |
-
print(f"Error extracting audio for Q{q_id}: {e}")
|
| 609 |
-
continue
|
| 610 |
-
|
| 611 |
-
# Audio Analysis
|
| 612 |
-
a_scores = compute_audio_scores(extract_audio_features(y, sr), baseline)
|
| 613 |
-
|
| 614 |
-
# Whisper Transcription
|
| 615 |
-
try:
|
| 616 |
-
transcription_data = asr(wav_p, chunk_length_s=30, return_timestamps=True)
|
| 617 |
transcription = transcription_data["text"].strip()
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
relevance_score = compute_relevance_score(q["question_text"], transcription)
|
| 623 |
-
|
| 624 |
-
# Visual Analysis
|
| 625 |
-
try:
|
| 626 |
-
ann_v, f_c, e_c = process_video_segment(raw_seg, output_dir, q_id, a_scores)
|
| 627 |
-
|
| 628 |
-
final_v = os.path.join(output_dir, f"q{q_id}_final.mp4")
|
| 629 |
-
subprocess.run([
|
| 630 |
-
'ffmpeg', '-y', '-i', ann_v, '-i', raw_seg, '-map', '0:v', '-map', '1:a',
|
| 631 |
-
'-c:v', 'copy', '-c:a', 'aac', final_v
|
| 632 |
-
], check=True, capture_output=True)
|
| 633 |
-
|
| 634 |
-
segments.append(final_v)
|
| 635 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 636 |
final_reports.append({
|
| 637 |
"questionId": q_id,
|
| 638 |
"userAnswerText": transcription,
|
| 639 |
"toneOfVoice": a_scores["tone_of_voice"],
|
| 640 |
"clarity": a_scores["clarity"],
|
| 641 |
"stress": a_scores["stress"],
|
| 642 |
-
"confidence":
|
| 643 |
"pauses": a_scores["pauses"],
|
| 644 |
"score": similarity_score,
|
| 645 |
"relevance": relevance_score
|
| 646 |
})
|
|
|
|
| 647 |
except Exception as e:
|
| 648 |
-
print(f"
|
| 649 |
-
|
| 650 |
-
torch.cuda.empty_cache()
|
| 651 |
-
|
| 652 |
-
# Final concatenation
|
| 653 |
-
if segments:
|
| 654 |
-
list_path = os.path.join(output_dir, "list.txt")
|
| 655 |
-
with open(list_path, "w") as f:
|
| 656 |
-
for s in segments:
|
| 657 |
-
f.write(f"file '{os.path.abspath(s)}'\n")
|
| 658 |
-
|
| 659 |
-
final_output = os.path.join(output_dir, "Intervision_Final_Result.mp4")
|
| 660 |
-
os.system(f"ffmpeg -f concat -safe 0 -i {list_path} -c:v libx264 -preset superfast -crf 23 -c:a aac -y {final_output}")
|
| 661 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 662 |
with open(os.path.join(output_dir, "report.json"), "w") as f:
|
| 663 |
json.dump({"listOfAnswerReport": final_reports}, f, indent=4)
|
| 664 |
|
| 665 |
-
return f"
|
| 666 |
-
|
| 667 |
-
|
|
|
|
|
|
| 480 |
|
| 481 |
##Integrated Video Processing (Analysis + Annotation)
|
| 482 |
|
| 483 |
+
def process_full_video(video_path, output_dir, questions_config, audio_results_map):
|
| 484 |
+
"""
|
| 485 |
+
Enhanced video processing with:
|
| 486 |
+
1. Real-time (Live) Audio Metric Bars using a sliding window.
|
| 487 |
+
2. Dynamic Emotion Wheel and Face Tracking.
|
| 488 |
+
3. Auto-wrapping Question Text that avoids UI overlap.
|
| 489 |
+
"""
|
| 490 |
cap = cv2.VideoCapture(video_path)
|
| 491 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 492 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 493 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 494 |
+
|
| 495 |
+
# Load full audio for live segment analysis
|
| 496 |
+
full_audio, sr = librosa.load(video_path, sr=16000)
|
| 497 |
+
|
| 498 |
+
temp_output = os.path.join(output_dir, "annotated_full_raw.mp4")
|
| 499 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 500 |
+
out = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
|
| 501 |
+
|
| 502 |
+
# Mediapipe Setup
|
| 503 |
+
base_options = python.BaseOptions(model_asset_path="face_landmarker.task")
|
| 504 |
+
options = vision.FaceLandmarkerOptions(
|
| 505 |
+
base_options=base_options,
|
| 506 |
+
running_mode=vision.RunningMode.VIDEO,
|
| 507 |
+
num_faces=1
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
frame_idx = 0
|
| 511 |
+
smooth_v, smooth_a = 0.0, 0.0
|
| 512 |
+
dom_emo = "neutral"
|
| 513 |
+
last_landmarks = None
|
| 514 |
|
| 515 |
+
# Live Scores Buffering
|
| 516 |
+
live_scores = {"confidence_audio": 0.0, "clarity": 0.0, "stress": 0.0}
|
| 517 |
+
smoothing_factor = 0.15 # Controls how "bouncy" the bars are
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 518 |
|
| 519 |
with vision.FaceLandmarker.create_from_options(options) as landmarker:
|
| 520 |
while cap.isOpened():
|
|
|
|
| 522 |
if not ret:
|
| 523 |
break
|
| 524 |
|
| 525 |
+
current_time = frame_idx / fps
|
| 526 |
+
active_answer = next((q for q in questions_config if q["start_time"] <= current_time <= q["end_time"]), None)
|
| 527 |
+
next_q = next((q for q in questions_config if current_time < q["start_time"]), None)
|
| 528 |
+
next_text = f"Q) {next_q['question_text']}" if next_q else "Preparing..."
|
| 529 |
+
|
| 530 |
+
# --- 1. LIVE AUDIO ANALYSIS (Every 10 frames) ---
|
| 531 |
+
if frame_idx % 10 == 0:
|
| 532 |
+
# Analyze the last 3 seconds of audio for "Live" feel
|
| 533 |
+
start_sample = max(0, int((current_time - 3) * sr))
|
| 534 |
+
end_sample = int(current_time * sr)
|
| 535 |
+
audio_segment = full_audio[start_sample:end_sample]
|
| 536 |
+
|
| 537 |
+
if len(audio_segment) > sr * 0.5: # At least 0.5s of audio to analyze
|
| 538 |
+
feats = extract_audio_features(audio_segment, sr)
|
| 539 |
+
# Use global baseline if available
|
| 540 |
+
instant_scores = compute_audio_scores(feats, baseline=None)
|
| 541 |
+
|
| 542 |
+
# Apply smoothing to prevent jittery bars
|
| 543 |
+
live_scores["confidence_audio"] += smoothing_factor * (instant_scores["confidence_audio"] - live_scores["confidence_audio"])
|
| 544 |
+
live_scores["clarity"] += smoothing_factor * (instant_scores["clarity"] - live_scores["clarity"])
|
| 545 |
+
live_scores["stress"] += smoothing_factor * (instant_scores["stress"] - live_scores["stress"])
|
| 546 |
+
|
| 547 |
+
# --- 2. VISUAL AI (Face & Emotion) ---
|
| 548 |
+
if frame_idx % 4 == 0:
|
| 549 |
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 550 |
+
results = landmarker.detect_for_video(mp_image, int(current_time * 1000))
|
| 551 |
|
| 552 |
+
if results.face_landmarks:
|
| 553 |
+
last_landmarks = results.face_landmarks[0]
|
| 554 |
+
emo_probs = analyze_face_emotion(frame)
|
| 555 |
+
dom_emo = max(emo_probs, key=emo_probs.get)
|
| 556 |
+
v_target, a_target = compute_valence_arousal_from_probs(emo_probs)
|
| 557 |
+
smooth_v += 0.15 * (v_target - smooth_v)
|
| 558 |
+
smooth_a += 0.15 * (a_target - smooth_a)
|
| 559 |
+
|
| 560 |
+
# --- 3. RENDERING UI ELEMENTS ---
|
| 561 |
+
# Face Box
|
| 562 |
+
if last_landmarks:
|
| 563 |
+
xs = [lm.x * width for lm in last_landmarks]
|
| 564 |
+
ys = [lm.y * height for lm in last_landmarks]
|
| 565 |
+
draw_face_box(frame, int(min(xs)), int(min(ys)), int(max(xs)-min(xs)), int(max(ys)-min(ys)), dom_emo)
|
| 566 |
+
|
| 567 |
+
# Emotion Wheel
|
| 568 |
+
draw_full_emotion_wheel(frame, (width - 130, height - 100), 90, smooth_v, smooth_a, dom_emo)
|
| 569 |
+
|
| 570 |
+
# Live Metric Bars
|
| 571 |
+
draw_metric_bars(
|
| 572 |
+
frame, 30, height - 160, 28, 200, 6,
|
| 573 |
+
live_scores["confidence_audio"], live_scores["clarity"], live_scores["stress"]
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
# Question Overlay (Wrapped Text)
|
| 577 |
+
if not active_answer:
|
| 578 |
+
frame = draw_question_overlay(frame, next_text, width, height)
|
| 579 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 580 |
out.write(frame)
|
| 581 |
frame_idx += 1
|
| 582 |
|
| 583 |
cap.release()
|
| 584 |
out.release()
|
| 585 |
+
return temp_output
|
| 586 |
+
|
| 587 |
+
def draw_question_overlay(frame, text, width, height):
|
| 588 |
+
"""Draws a wrapped text box above the Wheel and Bars."""
|
| 589 |
+
font = cv2.FONT_HERSHEY_DUPLEX
|
| 590 |
+
font_scale = 0.65
|
| 591 |
+
thickness = 1
|
| 592 |
+
side_margin = 50
|
| 593 |
+
bottom_limit = height - 270 # Ensure it stays above the bars/wheel
|
| 594 |
+
line_height = 35
|
| 595 |
+
|
| 596 |
+
# Text Wrapping Logic
|
| 597 |
+
max_w = width - (2 * side_margin)
|
| 598 |
+
words = text.split(' ')
|
| 599 |
+
lines, current_line = [], ""
|
| 600 |
+
for word in words:
|
| 601 |
+
test = current_line + word + " "
|
| 602 |
+
(w, _), _ = cv2.getTextSize(test, font, font_scale, thickness)
|
| 603 |
+
if w < max_w: current_line = test
|
| 604 |
+
else:
|
| 605 |
+
lines.append(current_line)
|
| 606 |
+
current_line = word + " "
|
| 607 |
+
lines.append(current_line)
|
| 608 |
+
|
| 609 |
+
# Box dimensions
|
| 610 |
+
rect_h = (len(lines) * line_height) + 20
|
| 611 |
+
y2 = bottom_limit
|
| 612 |
+
y1 = y2 - rect_h
|
| 613 |
+
|
| 614 |
+
# Transparent Background
|
| 615 |
+
overlay = frame.copy()
|
| 616 |
+
cv2.rectangle(overlay, (side_margin - 10, y1), (width - side_margin + 10, y2), (20, 20, 20), -1)
|
| 617 |
+
cv2.addWeighted(overlay, 0.7, frame, 0.3, 0, frame)
|
| 618 |
+
|
| 619 |
+
# Draw Lines
|
| 620 |
+
for i, line in enumerate(lines):
|
| 621 |
+
(tw, th), _ = cv2.getTextSize(line.strip(), font, font_scale, thickness)
|
| 622 |
+
tx = (width - tw) // 2
|
| 623 |
+
ty = y1 + 25 + (i * line_height)
|
| 624 |
+
cv2.putText(frame, line.strip(), (tx, ty), font, font_scale, (255, 255, 255), thickness, cv2.LINE_AA)
|
| 625 |
+
|
| 626 |
+
return frame
|
| 627 |
+
|
| 628 |
|
| 629 |
##Main pipeline
|
| 630 |
def run_intervision_pipeline(video_path, questions_config, output_dir):
|
|
|
|
| 633 |
|
| 634 |
os.makedirs(output_dir, exist_ok=True)
|
| 635 |
|
| 636 |
+
# 1. Establish Audio Baseline (First 10s)
|
| 637 |
try:
|
| 638 |
y_b, sr_b = librosa.load(video_path, sr=16000, duration=10)
|
| 639 |
baseline = extract_audio_features(y_b, sr_b)
|
| 640 |
except Exception as e:
|
| 641 |
+
print(f"Baseline Warning: {e}")
|
| 642 |
baseline = None
|
| 643 |
|
| 644 |
+
final_reports = []
|
| 645 |
+
audio_results_map = {} # To pass scores to the visual function
|
| 646 |
|
| 647 |
+
# 2. Pre-process each question (Audio & Text Analysis only)
|
| 648 |
for q in questions_config:
|
| 649 |
q_id = q['question_id']
|
|
|
|
| 650 |
wav_p = os.path.join(output_dir, f"q{q_id}.wav")
|
|
|
|
|
|
|
| 651 |
duration = q["end_time"] - q["start_time"]
|
| 652 |
+
|
| 653 |
+
# Extract audio segment for analysis
|
| 654 |
try:
|
| 655 |
subprocess.run([
|
| 656 |
'ffmpeg', '-y', '-ss', str(q["start_time"]), '-t', str(duration),
|
| 657 |
+
'-i', video_path, '-vn', '-acodec', 'pcm_s16le', '-ar', '16000', wav_p
|
| 658 |
], check=True, capture_output=True)
|
| 659 |
+
|
| 660 |
+
y, sr = librosa.load(wav_p, sr=16000)
|
| 661 |
+
a_scores = compute_audio_scores(extract_audio_features(y, sr), baseline)
|
| 662 |
+
|
| 663 |
+
# Whisper Transcription
|
| 664 |
+
transcription_data = asr(wav_p, chunk_length_s=30)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 665 |
transcription = transcription_data["text"].strip()
|
| 666 |
+
|
| 667 |
+
# Scores
|
| 668 |
+
similarity_score = compute_similarity_score(transcription, q["ideal_answer"])
|
| 669 |
+
relevance_score = compute_relevance_score(q["question_text"], transcription)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 670 |
|
| 671 |
+
# Store results for the visual processing step
|
| 672 |
+
audio_results_map[q_id] = a_scores
|
| 673 |
+
|
| 674 |
+
# Prepare the final JSON report entry
|
| 675 |
final_reports.append({
|
| 676 |
"questionId": q_id,
|
| 677 |
"userAnswerText": transcription,
|
| 678 |
"toneOfVoice": a_scores["tone_of_voice"],
|
| 679 |
"clarity": a_scores["clarity"],
|
| 680 |
"stress": a_scores["stress"],
|
| 681 |
+
"confidence": a_scores["confidence_audio"], # We'll refine this after visual if needed
|
| 682 |
"pauses": a_scores["pauses"],
|
| 683 |
"score": similarity_score,
|
| 684 |
"relevance": relevance_score
|
| 685 |
})
|
| 686 |
+
|
| 687 |
except Exception as e:
|
| 688 |
+
print(f"Error analyzing Question {q_id}: {e}")
|
| 689 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 690 |
|
| 691 |
+
# 3. Process the entire video visually (Annotations + Overlays)
|
| 692 |
+
try:
|
| 693 |
+
# This calls the English function we wrote in the previous message
|
| 694 |
+
annotated_video_raw = process_full_video(video_path, output_dir, questions_config, audio_results_map)
|
| 695 |
+
|
| 696 |
+
# 4. Final Merge: Put original audio back onto the annotated video
|
| 697 |
+
final_output = os.path.join(output_dir, "Intervision_Final_Report.mp4")
|
| 698 |
+
subprocess.run([
|
| 699 |
+
'ffmpeg', '-y', '-i', annotated_video_raw, '-i', video_path,
|
| 700 |
+
'-map', '0:v:0', '-map', '1:a:0', '-c:v', 'libx264', '-preset', 'veryfast',
|
| 701 |
+
'-crf', '22', '-c:a', 'aac', '-b:a', '192k', '-shortest', final_output
|
| 702 |
+
], check=True)
|
| 703 |
+
|
| 704 |
+
# Save the JSON report
|
| 705 |
with open(os.path.join(output_dir, "report.json"), "w") as f:
|
| 706 |
json.dump({"listOfAnswerReport": final_reports}, f, indent=4)
|
| 707 |
|
| 708 |
+
return f"Success! Full video generated at {final_output}"
|
| 709 |
+
|
| 710 |
+
except Exception as e:
|
| 711 |
+
return f"Visual processing or merging failed: {e}"
|