| import tensorflow as tf |
| import torch |
| import numpy as np |
| import os |
|
|
| |
| keras_model = tf.keras.models.load_model('wav2vec_model.h5') |
|
|
| |
| class EmotionClassifier(torch.nn.Module): |
| def __init__(self, input_shape, num_classes): |
| super().__init__() |
| |
| self.flatten = torch.nn.Flatten() |
| self.layers = torch.nn.Sequential( |
| torch.nn.Linear(input_shape, 128), |
| torch.nn.ReLU(), |
| torch.nn.Dropout(0.3), |
| torch.nn.Linear(128, 64), |
| torch.nn.ReLU(), |
| torch.nn.Dropout(0.3), |
| torch.nn.Linear(64, num_classes) |
| ) |
| |
| def forward(self, x): |
| x = self.flatten(x) |
| return self.layers(x) |
|
|
| |
| |
| input_shape = 13 * 128 |
| num_classes = 7 |
| pytorch_model = EmotionClassifier(input_shape, num_classes) |
|
|
| |
| |
| for i, layer in enumerate(keras_model.layers): |
| if isinstance(layer, tf.keras.layers.Dense): |
| |
| keras_weights = layer.get_weights()[0] |
| keras_bias = layer.get_weights()[1] |
| |
| |
| |
| pytorch_layer = pytorch_model.layers[i * 2] |
| |
| |
| pytorch_layer.weight.data = torch.tensor(keras_weights.T, dtype=torch.float32) |
| pytorch_layer.bias.data = torch.tensor(keras_bias, dtype=torch.float32) |
|
|
| |
| torch.save(pytorch_model.state_dict(), 'emotion_model.pt') |