Update app.py
Browse files
app.py
CHANGED
|
@@ -19,9 +19,20 @@ def load_emotion_model(model_path):
|
|
| 19 |
print("Error loading emotion prediction model:", e)
|
| 20 |
return None
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
model_path = 'mymodel_SER_LSTM_RAVDESS.h5'
|
| 27 |
emotion_model = load_emotion_model(model_path)
|
|
|
|
| 19 |
print("Error loading emotion prediction model:", e)
|
| 20 |
return None
|
| 21 |
|
| 22 |
+
|
| 23 |
+
from faster_whisper import WhisperModel
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
model_size = "small"
|
| 27 |
+
|
| 28 |
+
# Run on GPU with FP16
|
| 29 |
+
model = WhisperModel(model_size, device="cpu", compute_type="int8")
|
| 30 |
+
|
| 31 |
+
def transcribe(audio):
|
| 32 |
+
segments, _ = model.transcribe(audio, beam_size=5)
|
| 33 |
+
return "".join([segment.text for segment in segments])
|
| 34 |
+
|
| 35 |
+
|
| 36 |
|
| 37 |
model_path = 'mymodel_SER_LSTM_RAVDESS.h5'
|
| 38 |
emotion_model = load_emotion_model(model_path)
|