THP2903 commited on
Commit
4303f04
·
verified ·
1 Parent(s): 1f82907

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -4,9 +4,16 @@ import torchaudio
4
  import cv2
5
  import os
6
  import numpy as np
 
7
 
8
  emotion_labels = {0: 'neutral', 1: 'calm', 2: 'happy', 3: 'sad', 4: 'angry', 5: 'fearful'}
9
 
 
 
 
 
 
 
10
  def process_video_audio(video_path, audio_path):
11
 
12
  wav = pt.tensor(list(audio_path[1]))
@@ -31,7 +38,6 @@ def process_video_audio(video_path, audio_path):
31
 
32
  cap = cv2.VideoCapture(video_path)
33
  frame_idx = 0
34
- last_frame = None
35
  for i in range(100):
36
  ret, frame = cap.read()
37
  if ret and (i % 10 == 0):
@@ -45,16 +51,15 @@ def process_video_audio(video_path, audio_path):
45
  else:
46
  resized_frame = cv2.resize(frame, (120, 120))
47
  train_visual[0, :, :, :, frame_idx] = pt.tensor(resized_frame)
48
- last_frame = frame
49
  frame_idx += 1
50
  cap.release()
51
- predicted_emotion = "unknown"
52
 
53
- return last_frame
54
 
55
  # Định nghĩa giao diện Gradio
56
  def gradio_interface(video, audio):
57
- frame= process_video_audio(video, audio)
 
58
  return frame
59
 
60
  iface = gr.Interface(
 
4
  import cv2
5
  import os
6
  import numpy as np
7
+ import tensorflow as tf
8
 
9
  emotion_labels = {0: 'neutral', 1: 'calm', 2: 'happy', 3: 'sad', 4: 'angry', 5: 'fearful'}
10
 
11
+ def trained_model(model_path):
12
+
13
+ model = load_model(model_path)
14
+
15
+ return model
16
+
17
  def process_video_audio(video_path, audio_path):
18
 
19
  wav = pt.tensor(list(audio_path[1]))
 
38
 
39
  cap = cv2.VideoCapture(video_path)
40
  frame_idx = 0
 
41
  for i in range(100):
42
  ret, frame = cap.read()
43
  if ret and (i % 10 == 0):
 
51
  else:
52
  resized_frame = cv2.resize(frame, (120, 120))
53
  train_visual[0, :, :, :, frame_idx] = pt.tensor(resized_frame)
 
54
  frame_idx += 1
55
  cap.release()
 
56
 
57
+ return train_visual, train_audio_wave, train_audio_cnn
58
 
59
  # Định nghĩa giao diện Gradio
60
  def gradio_interface(video, audio):
61
+ train_visual, train_audio_wave, train_audio_cnn = process_video_audio(video, audio)
62
+ model = trained_model("./model_vui_ve.h5")
63
  return frame
64
 
65
  iface = gr.Interface(