Upload folder using huggingface_hub
Browse files- best_emotion_model.keras +2 -2
- emotion_recognition_model.keras +2 -2
- emotion_recognition_wrapper_model.keras +2 -2
- example_usage.py +12 -12
- training_history.png +0 -0
best_emotion_model.keras
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:045c6926ad8a2db4300f046b277bc3d45f5d1f9569638fd242503c40fcacdeab
|
| 3 |
+
size 11589719
|
emotion_recognition_model.keras
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17d42eb5c16319510b808d7a2219d19d3adc62b6259597ab811564a02f13f08a
|
| 3 |
+
size 11589719
|
emotion_recognition_wrapper_model.keras
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81885670209be184409345406394306fe3f3743a1f5dda0bf52d92e1a7ecd7b9
|
| 3 |
+
size 11574306
|
example_usage.py
CHANGED
|
@@ -9,18 +9,18 @@ def load_model(model_path):
|
|
| 9 |
"Load the emotion recognition model."
|
| 10 |
return tf.keras.models.load_model(model_path)
|
| 11 |
|
| 12 |
-
def predict_emotion(model, audio_path,
|
| 13 |
"Predict emotion from an audio file."
|
| 14 |
# Load audio file
|
| 15 |
waveform, sr = librosa.load(
|
| 16 |
audio_path,
|
| 17 |
-
sr=
|
| 18 |
-
duration=
|
| 19 |
-
offset=
|
| 20 |
)
|
| 21 |
|
| 22 |
# Ensure consistent length
|
| 23 |
-
target_length = int(
|
| 24 |
if len(waveform) < target_length:
|
| 25 |
waveform = np.pad(waveform, (0, target_length - len(waveform)))
|
| 26 |
if len(waveform) > target_length:
|
|
@@ -29,9 +29,9 @@ def predict_emotion(model, audio_path, preprocessing_config):
|
|
| 29 |
# Extract features
|
| 30 |
mel_spec = librosa.feature.melspectrogram(
|
| 31 |
y=waveform,
|
| 32 |
-
sr=
|
| 33 |
-
n_fft=
|
| 34 |
-
hop_length=
|
| 35 |
n_mels=128
|
| 36 |
)
|
| 37 |
|
|
@@ -46,7 +46,7 @@ def predict_emotion(model, audio_path, preprocessing_config):
|
|
| 46 |
|
| 47 |
# Get emotion label
|
| 48 |
emotion_idx = np.argmax(prediction)
|
| 49 |
-
emotion =
|
| 50 |
|
| 51 |
return emotion, prediction[emotion_idx]
|
| 52 |
|
|
@@ -55,14 +55,14 @@ if __name__ == "__main__":
|
|
| 55 |
# Load model
|
| 56 |
model = load_model("emotion_recognition_model.keras")
|
| 57 |
|
| 58 |
-
# Load
|
| 59 |
with open("preprocessing.json", "r") as f:
|
| 60 |
-
|
| 61 |
|
| 62 |
# Path to your audio file
|
| 63 |
audio_path = "path/to/your/audio.wav"
|
| 64 |
|
| 65 |
# Predict emotion
|
| 66 |
-
emotion, confidence = predict_emotion(model, audio_path,
|
| 67 |
|
| 68 |
print(f"Predicted emotion: {emotion} with confidence {confidence:.2f}")
|
|
|
|
| 9 |
"Load the emotion recognition model."
|
| 10 |
return tf.keras.models.load_model(model_path)
|
| 11 |
|
| 12 |
+
def predict_emotion(model, audio_path, preprocessor_config):
|
| 13 |
"Predict emotion from an audio file."
|
| 14 |
# Load audio file
|
| 15 |
waveform, sr = librosa.load(
|
| 16 |
audio_path,
|
| 17 |
+
sr=preprocessor_config["sample_rate"],
|
| 18 |
+
duration=preprocessor_config["duration"],
|
| 19 |
+
offset=preprocessor_config["offset"]
|
| 20 |
)
|
| 21 |
|
| 22 |
# Ensure consistent length
|
| 23 |
+
target_length = int(preprocessor_config["sample_rate"] * preprocessor_config["duration"])
|
| 24 |
if len(waveform) < target_length:
|
| 25 |
waveform = np.pad(waveform, (0, target_length - len(waveform)))
|
| 26 |
if len(waveform) > target_length:
|
|
|
|
| 29 |
# Extract features
|
| 30 |
mel_spec = librosa.feature.melspectrogram(
|
| 31 |
y=waveform,
|
| 32 |
+
sr=preprocessor_config["sample_rate"],
|
| 33 |
+
n_fft=preprocessor_config["frame_length"],
|
| 34 |
+
hop_length=preprocessor_config["hop_length"],
|
| 35 |
n_mels=128
|
| 36 |
)
|
| 37 |
|
|
|
|
| 46 |
|
| 47 |
# Get emotion label
|
| 48 |
emotion_idx = np.argmax(prediction)
|
| 49 |
+
emotion = preprocessor_config["emotions"][str(emotion_idx)]
|
| 50 |
|
| 51 |
return emotion, prediction[emotion_idx]
|
| 52 |
|
|
|
|
| 55 |
# Load model
|
| 56 |
model = load_model("emotion_recognition_model.keras")
|
| 57 |
|
| 58 |
+
# Load preprocessor_config
|
| 59 |
with open("preprocessing.json", "r") as f:
|
| 60 |
+
preprocessor_config = json.load(f)
|
| 61 |
|
| 62 |
# Path to your audio file
|
| 63 |
audio_path = "path/to/your/audio.wav"
|
| 64 |
|
| 65 |
# Predict emotion
|
| 66 |
+
emotion, confidence = predict_emotion(model, audio_path, preprocessor_config)
|
| 67 |
|
| 68 |
print(f"Predicted emotion: {emotion} with confidence {confidence:.2f}")
|
training_history.png
CHANGED
|
|