Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,6 +29,7 @@ def process_video_audio(video_path):
|
|
| 29 |
|
| 30 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 31 |
|
|
|
|
| 32 |
if len(wav[0]) > 261540:
|
| 33 |
print(wav.shape)
|
| 34 |
train_audio_wave[0, :] = wav[0][:261540]
|
|
@@ -69,7 +70,7 @@ def process_video_audio(video_path):
|
|
| 69 |
def predict_emotion(video_path):
|
| 70 |
last_frame, audio_path, train_visual, train_audio_wave, train_audio_cnn = process_video_audio(video_path)
|
| 71 |
|
| 72 |
-
model = load_model("
|
| 73 |
|
| 74 |
predictions = model.predict({
|
| 75 |
"input_visual": train_visual,
|
|
@@ -81,7 +82,7 @@ def predict_emotion(video_path):
|
|
| 81 |
return last_frame, audio_path, predicted_label
|
| 82 |
|
| 83 |
def predict_emotion_gradio(video_path):
|
| 84 |
-
emotion_dict = {0: 'neutral', 1: 'calm', 2: 'happy', 3: 'sad', 4: 'angry', 5: 'fearful'
|
| 85 |
last_frame, audio_path, predicted_label = predict_emotion(video_path)
|
| 86 |
predicted_emotion = emotion_dict[predicted_label]
|
| 87 |
return last_frame, audio_path, predicted_emotion
|
|
|
|
| 29 |
|
| 30 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 31 |
|
| 32 |
+
|
| 33 |
if len(wav[0]) > 261540:
|
| 34 |
print(wav.shape)
|
| 35 |
train_audio_wave[0, :] = wav[0][:261540]
|
|
|
|
| 70 |
def predict_emotion(video_path):
|
| 71 |
last_frame, audio_path, train_visual, train_audio_wave, train_audio_cnn = process_video_audio(video_path)
|
| 72 |
|
| 73 |
+
model = load_model("model_vui_ve1024.keras")
|
| 74 |
|
| 75 |
predictions = model.predict({
|
| 76 |
"input_visual": train_visual,
|
|
|
|
| 82 |
return last_frame, audio_path, predicted_label
|
| 83 |
|
| 84 |
def predict_emotion_gradio(video_path):
|
| 85 |
+
emotion_dict = {0: 'neutral', 1: 'calm', 2: 'happy', 3: 'sad', 4: 'angry', 5: 'fearful'}
|
| 86 |
last_frame, audio_path, predicted_label = predict_emotion(video_path)
|
| 87 |
predicted_emotion = emotion_dict[predicted_label]
|
| 88 |
return last_frame, audio_path, predicted_emotion
|