File size: 1,599 Bytes
29fc8cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import tensorflow as tf
import numpy as np
import os
import librosa
import pandas as pd

# File paths
AUDIO_PATH = 'data/train/'  # Path to your audio files
CSV_PATH = 'data/train/transcriptions.csv'  # Path to your transcriptions

# Load the CSV file containing the transcriptions
df = pd.read_csv(CSV_PATH)
filenames = df['filename'].values
texts = df['text'].values

# Function to preprocess the audio: Convert to Mel spectrogram
def preprocess_audio(filename):
    file_path = os.path.join(AUDIO_PATH, filename)
    y, sr = librosa.load(file_path, sr=None)
    mel_spec = librosa.feature.melspectrogram(y, sr=sr, n_mels=128)
    mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
    return mel_spec

# Preprocess all audio files
X = np.array([preprocess_audio(f) for f in filenames])

# Here, we simplify and assume the target is just the transcription text (you can extend this later)
y = texts  # This should ideally be one-hot encoded or tokenized for TTS models

# Create a simple neural network model (this can be a start)
model = tf.keras.Sequential([
    tf.keras.layers.InputLayer(input_shape=(None, 128)),  # Mel spectrogram shape
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=True)),
    tf.keras.layers.Dense(256, activation='relu'),
    tf.keras.layers.Dense(len(np.unique(texts)), activation='softmax')  # Output layer
])

# Compile the model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

# Train the model
model.fit(X, y, epochs=10, batch_size=32)

# Save the model
model.save('model/tts_model.h5')