TelmaG commited on
Commit
a31451c
·
1 Parent(s): 581a34f

Add model with Gradio for Inference

Browse files
Files changed (14) hide show
  1. .gitattributes +1 -0
  2. app.py +35 -0
  3. dataset_preprocess.py +231 -0
  4. features.py +150 -0
  5. inference.py +515 -0
  6. model/model.h5 +3 -0
  7. model/model.npy +3 -0
  8. params.py +36 -0
  9. requirements.txt +6 -0
  10. train.py +514 -0
  11. yamnet.py +164 -0
  12. yamnet/yamnet.h5 +3 -0
  13. yamnet/yamnet_class_map.csv +526 -0
  14. yamnet_test.py +56 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.wav filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tempfile
3
+ from inference import AudioClassifier, Config
4
+
5
+ # Initialize config
6
+ config = Config(
7
+ yamnet_model_path="yamnet/yamnet.h5",
8
+ yamnet_classes_path="yamnet/yamnet_class_map.csv",
9
+ model_path="model/model.h5",
10
+ custom_classes_path="model/model.npy",
11
+ output_dir="results",
12
+ output_file="classification.txt"
13
+ )
14
+
15
+ classifier = AudioClassifier(config)
16
+
17
+ def classify_audio(audio_file):
18
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp:
19
+ temp.write(audio_file.read())
20
+ path = temp.name
21
+
22
+ result = classifier.classify_file(path)
23
+ label = result['dominant_label']
24
+ percent = result['dominant_score_percentage']
25
+ breakdown = "\n".join(f"{k}: {round(v*100)}%" for k, v in sorted(
26
+ result["aggregated_predictions"].items(), key=lambda x: x[1], reverse=True))
27
+ return f"🔊 **Top Prediction**: {label} ({percent}%)\n\n📊 **Top Classes**:\n{breakdown}"
28
+
29
+ gr.Interface(
30
+ fn=classify_audio,
31
+ inputs=gr.Audio(type="file"),
32
+ outputs="markdown",
33
+ title="YAMNet + Custom Model Audio Classifier",
34
+ description="Upload a WAV file."
35
+ ).launch()
dataset_preprocess.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Audio Converter
3
+
4
+ This script scans a directory for audio files and converts them to WAV format.
5
+ It only processes audio files and skips all other file types.
6
+
7
+ Usage:
8
+ python audio_converter.py --input_dir /path/to/audio/files --output_dir /path/to/output
9
+ """
10
+
11
+ import os
12
+ import sys
13
+ import argparse
14
+ from pathlib import Path
15
+ from typing import List, Tuple
16
+ import subprocess
17
+
18
+
19
+ def print_info(message):
20
+ print(f"INFO: {message}")
21
+
22
+ def print_error(message):
23
+ print(f"ERROR: {message}")
24
+
25
+ def print_debug(message):
26
+ if VERBOSE:
27
+ print(f"DEBUG: {message}")
28
+
29
+ VERBOSE = False
30
+
31
+ # Audio formats that can be converted
32
+ AUDIO_FORMATS = {
33
+ '.mp3', '.m4a', '.aac', '.flac', '.ogg', '.wma', '.aiff', '.ape', '.opus'
34
+ }
35
+
36
+
37
+ def check_dependencies() -> bool:
38
+ """
39
+ Check if required dependencies are installed.
40
+
41
+ Returns:
42
+ bool: True if dependencies are met, False otherwise
43
+ """
44
+ # Check for ffmpeg
45
+ try:
46
+ subprocess.run(
47
+ ["ffmpeg", "-version"],
48
+ stdout=subprocess.PIPE,
49
+ stderr=subprocess.PIPE
50
+ )
51
+ print_info("ffmpeg is installed.")
52
+ return True
53
+ except FileNotFoundError:
54
+ print_error("ffmpeg is not installed. Please install it before running this script.")
55
+ return False
56
+
57
+
58
+ def scan_directory(directory: str) -> Tuple[List[Path], List[Path]]:
59
+ """
60
+ Scan directory for audio files.
61
+
62
+ Args:
63
+ directory: Path to the directory to scan
64
+
65
+ Returns:
66
+ Tuple containing lists of audio files and files to skip
67
+ """
68
+ audio_files = []
69
+ skip_files = []
70
+
71
+ dir_path = Path(directory)
72
+ if not dir_path.exists():
73
+ raise FileNotFoundError(f"Directory not found: {directory}")
74
+
75
+ for file_path in dir_path.glob('**/*'):
76
+ if file_path.is_file():
77
+ file_ext = file_path.suffix.lower()
78
+
79
+ if file_ext in AUDIO_FORMATS:
80
+ audio_files.append(file_path)
81
+ elif file_ext == '.wav':
82
+ # Skip existing WAV files
83
+ print_debug(f"Skipping existing WAV file: {file_path}")
84
+ skip_files.append(file_path)
85
+ else:
86
+ # Skip non-audio files
87
+ print_debug(f"Skipping non-audio file: {file_path}")
88
+ skip_files.append(file_path)
89
+
90
+ print_info(f"Found {len(audio_files)} audio files to convert")
91
+ print_info(f"Skipping {len(skip_files)} files (WAV or non-audio)")
92
+
93
+ return audio_files, skip_files
94
+
95
+
96
+ def convert_audio_to_wav(input_file: Path, output_file: Path) -> bool:
97
+ """
98
+ Convert audio file to WAV format using ffmpeg.
99
+
100
+ Args:
101
+ input_file: Path to input audio file
102
+ output_file: Path to output WAV file
103
+
104
+ Returns:
105
+ bool: True if conversion was successful, False otherwise
106
+ """
107
+ try:
108
+ # Ensure output directory exists
109
+ output_file.parent.mkdir(parents=True, exist_ok=True)
110
+
111
+ cmd = [
112
+ "ffmpeg",
113
+ "-y", # Overwrite output file if it exists
114
+ "-i", str(input_file), # Input file
115
+ "-acodec", "pcm_s16le", # Output codec (16-bit PCM)
116
+ "-ar", "44100", # Sample rate (44.1kHz)
117
+ "-ac", "1", # Mono audio (1 channel)
118
+ str(output_file) # Output file
119
+ ]
120
+
121
+ process = subprocess.run(
122
+ cmd,
123
+ stdout=subprocess.PIPE,
124
+ stderr=subprocess.PIPE
125
+ )
126
+
127
+ if process.returncode != 0:
128
+ print_error(f"Error converting {input_file}: {process.stderr.decode()}")
129
+ return False
130
+
131
+ print_info(f"Successfully converted {input_file} to WAV")
132
+ return True
133
+
134
+ except Exception as e:
135
+ print_error(f"Error converting {input_file}: {str(e)}")
136
+ return False
137
+
138
+
139
+ def process_files(audio_files: List[Path], input_dir: str, output_dir: str,
140
+ preserve_structure: bool = True) -> Tuple[int, int]:
141
+ """
142
+ Process all identified audio files for conversion.
143
+
144
+ Args:
145
+ audio_files: List of audio files to convert
146
+ input_dir: Input directory path
147
+ output_dir: Output directory path
148
+
149
+ Returns:
150
+ Tuple of successful conversions, failed conversions
151
+ """
152
+ input_base = Path(input_dir)
153
+ output_base = Path(output_dir)
154
+
155
+ success_count = 0
156
+ failure_count = 0
157
+
158
+ # Process audio files
159
+ for audio_file in audio_files:
160
+ if preserve_structure:
161
+ rel_path = audio_file.relative_to(input_base)
162
+ output_file = output_base / rel_path.with_suffix('.wav')
163
+ else:
164
+
165
+ output_file = output_base / f"{audio_file.stem}.wav"
166
+
167
+ if convert_audio_to_wav(audio_file, output_file):
168
+ success_count += 1
169
+ else:
170
+ failure_count += 1
171
+
172
+ return success_count, failure_count
173
+
174
+
175
+ def parse_arguments() -> argparse.Namespace:
176
+ """Parse command-line arguments."""
177
+ parser = argparse.ArgumentParser(description="Convert audio files to WAV format")
178
+ parser.add_argument('--input_dir', type=str, required=True,
179
+ help='Directory containing files to convert')
180
+ parser.add_argument('--output_dir', type=str, required=True,
181
+ help='Directory for output WAV files')
182
+ parser.add_argument('--flat', action='store_true',
183
+ help='Don\'t preserve directory structure')
184
+ parser.add_argument('--verbose', '-v', action='store_true',
185
+ help='Enable verbose output')
186
+
187
+ return parser.parse_args()
188
+
189
+
190
+ def main():
191
+ """Main function to run the script."""
192
+ global VERBOSE
193
+
194
+ try:
195
+ args = parse_arguments()
196
+
197
+ VERBOSE = args.verbose
198
+
199
+ print_info(f"Input directory: {args.input_dir}")
200
+ print_info(f"Output directory: {args.output_dir}")
201
+
202
+ if not check_dependencies():
203
+ print_error("Missing dependencies. Please install required packages.")
204
+ sys.exit(1)
205
+
206
+ # Create output directory if it doesn't exist
207
+ os.makedirs(args.output_dir, exist_ok=True)
208
+
209
+ audio_files, skip_files = scan_directory(args.input_dir)
210
+
211
+ # Process files
212
+ preserve_structure = not args.flat
213
+ success_count, failure_count = process_files(
214
+ audio_files,
215
+ args.input_dir,
216
+ args.output_dir,
217
+ preserve_structure
218
+ )
219
+
220
+ # Print summary
221
+ print_info(f"Conversion complete!")
222
+ print_info(f"Successfully converted: {success_count} files")
223
+ print_info(f"Failed conversions: {failure_count} files")
224
+
225
+ except Exception as e:
226
+ print_error(f"Error during execution: {str(e)}")
227
+ sys.exit(1)
228
+
229
+
230
+ if __name__ == "__main__":
231
+ main()
features.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Feature computation for YAMNet."""
2
+
3
+ import numpy as np
4
+ import tensorflow as tf
5
+
6
+
7
+ def waveform_to_log_mel_spectrogram_patches(waveform, params):
8
+ """Compute log mel spectrogram patches of a 1-D waveform."""
9
+ with tf.name_scope('log_mel_features'):
10
+ # waveform has shape [<# samples>]
11
+
12
+ # Convert waveform into spectrogram using a Short-Time Fourier Transform.
13
+ # Note that tf.signal.stft() uses a periodic Hann window by default.
14
+ window_length_samples = int(
15
+ round(params.sample_rate * params.stft_window_seconds))
16
+ hop_length_samples = int(
17
+ round(params.sample_rate * params.stft_hop_seconds))
18
+ fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
19
+ num_spectrogram_bins = fft_length // 2 + 1
20
+ if params.tflite_compatible:
21
+ magnitude_spectrogram = _tflite_stft_magnitude(
22
+ signal=waveform,
23
+ frame_length=window_length_samples,
24
+ frame_step=hop_length_samples,
25
+ fft_length=fft_length)
26
+ else:
27
+ magnitude_spectrogram = tf.abs(tf.signal.stft(
28
+ signals=waveform,
29
+ frame_length=window_length_samples,
30
+ frame_step=hop_length_samples,
31
+ fft_length=fft_length))
32
+ # magnitude_spectrogram has shape [<# STFT frames>, num_spectrogram_bins]
33
+
34
+ # Convert spectrogram into log mel spectrogram.
35
+ linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
36
+ num_mel_bins=params.mel_bands,
37
+ num_spectrogram_bins=num_spectrogram_bins,
38
+ sample_rate=params.sample_rate,
39
+ lower_edge_hertz=params.mel_min_hz,
40
+ upper_edge_hertz=params.mel_max_hz)
41
+ mel_spectrogram = tf.matmul(
42
+ magnitude_spectrogram, linear_to_mel_weight_matrix)
43
+ log_mel_spectrogram = tf.math.log(mel_spectrogram + params.log_offset)
44
+ # log_mel_spectrogram has shape [<# STFT frames>, params.mel_bands]
45
+
46
+ # Frame spectrogram (shape [<# STFT frames>, params.mel_bands]) into patches
47
+ # (the input examples). Only complete frames are emitted, so if there is
48
+ # less than params.patch_window_seconds of waveform then nothing is emitted
49
+ # (to avoid this, zero-pad before processing).
50
+ spectrogram_hop_length_samples = int(
51
+ round(params.sample_rate * params.stft_hop_seconds))
52
+ spectrogram_sample_rate = params.sample_rate / spectrogram_hop_length_samples
53
+ patch_window_length_samples = int(
54
+ round(spectrogram_sample_rate * params.patch_window_seconds))
55
+ patch_hop_length_samples = int(
56
+ round(spectrogram_sample_rate * params.patch_hop_seconds))
57
+ features = tf.signal.frame(
58
+ signal=log_mel_spectrogram,
59
+ frame_length=patch_window_length_samples,
60
+ frame_step=patch_hop_length_samples,
61
+ axis=0)
62
+ # features has shape [<# patches>, <# STFT frames in an patch>, params.mel_bands]
63
+
64
+ return log_mel_spectrogram, features
65
+
66
+
67
+ def pad_waveform(waveform, params):
68
+ """Pads waveform with silence if needed to get an integral number of patches."""
69
+ # In order to produce one patch of log mel spectrogram input to YAMNet, we
70
+ # need at least one patch window length of waveform plus enough extra samples
71
+ # to complete the final STFT analysis window.
72
+ min_waveform_seconds = (
73
+ params.patch_window_seconds +
74
+ params.stft_window_seconds - params.stft_hop_seconds)
75
+ min_num_samples = tf.cast(min_waveform_seconds * params.sample_rate, tf.int32)
76
+ num_samples = tf.shape(waveform)[0]
77
+ num_padding_samples = tf.maximum(0, min_num_samples - num_samples)
78
+
79
+ # In addition, there might be enough waveform for one or more additional
80
+ # patches formed by hopping forward. If there are more samples than one patch,
81
+ # round up to an integral number of hops.
82
+ num_samples = tf.maximum(num_samples, min_num_samples)
83
+ num_samples_after_first_patch = num_samples - min_num_samples
84
+ hop_samples = tf.cast(params.patch_hop_seconds * params.sample_rate, tf.int32)
85
+ num_hops_after_first_patch = tf.cast(tf.math.ceil(
86
+ tf.cast(num_samples_after_first_patch, tf.float32) /
87
+ tf.cast(hop_samples, tf.float32)), tf.int32)
88
+ num_padding_samples += (
89
+ hop_samples * num_hops_after_first_patch - num_samples_after_first_patch)
90
+
91
+ padded_waveform = tf.pad(waveform, [[0, num_padding_samples]],
92
+ mode='CONSTANT', constant_values=0.0)
93
+ return padded_waveform
94
+
95
+
96
+ def _tflite_stft_magnitude(signal, frame_length, frame_step, fft_length):
97
+ """TF-Lite-compatible version of tf.abs(tf.signal.stft())."""
98
+ def _hann_window():
99
+ return tf.reshape(
100
+ tf.constant(
101
+ (0.5 - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))
102
+ ).astype(np.float32),
103
+ name='hann_window'), [1, frame_length])
104
+
105
+ def _dft_matrix(dft_length):
106
+ """Calculate the full DFT matrix in NumPy."""
107
+ # See https://en.wikipedia.org/wiki/DFT_matrix
108
+ omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)
109
+ # Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.
110
+ return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))
111
+
112
+ def _rdft(framed_signal, fft_length):
113
+ """Implement real-input Discrete Fourier Transform by matmul."""
114
+ # We are right-multiplying by the DFT matrix, and we are keeping only the
115
+ # first half ("positive frequencies"). So discard the second half of rows,
116
+ # but transpose the array for right-multiplication. The DFT matrix is
117
+ # symmetric, so we could have done it more directly, but this reflects our
118
+ # intention better.
119
+ complex_dft_matrix_kept_values = _dft_matrix(fft_length)[:(
120
+ fft_length // 2 + 1), :].transpose()
121
+ real_dft_matrix = tf.constant(
122
+ np.real(complex_dft_matrix_kept_values).astype(np.float32),
123
+ name='real_dft_matrix')
124
+ imag_dft_matrix = tf.constant(
125
+ np.imag(complex_dft_matrix_kept_values).astype(np.float32),
126
+ name='imaginary_dft_matrix')
127
+ signal_frame_length = tf.shape(framed_signal)[-1]
128
+ half_pad = (fft_length - signal_frame_length) // 2
129
+ padded_frames = tf.pad(
130
+ framed_signal,
131
+ [
132
+ # Don't add any padding in the frame dimension.
133
+ [0, 0],
134
+ # Pad before and after the signal within each frame.
135
+ [half_pad, fft_length - signal_frame_length - half_pad]
136
+ ],
137
+ mode='CONSTANT',
138
+ constant_values=0.0)
139
+ real_stft = tf.matmul(padded_frames, real_dft_matrix)
140
+ imag_stft = tf.matmul(padded_frames, imag_dft_matrix)
141
+ return real_stft, imag_stft
142
+
143
+ def _complex_abs(real, imag):
144
+ return tf.sqrt(tf.add(real * real, imag * imag))
145
+
146
+ framed_signal = tf.signal.frame(signal, frame_length, frame_step)
147
+ windowed_signal = framed_signal * _hann_window()
148
+ real_stft, imag_stft = _rdft(windowed_signal, fft_length)
149
+ stft_magnitude = _complex_abs(real_stft, imag_stft)
150
+ return stft_magnitude
inference.py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Audio Classification using YAMNet and Custom Models
3
+ A streamlined tool for classifying audio using pre-trained and custom models.
4
+ """
5
+
6
+ import os
7
+ import argparse
8
+ import logging
9
+ from pathlib import Path
10
+ from typing import Dict, List, Tuple, Optional, Union, Any
11
+ from dataclasses import dataclass, field
12
+
13
+ import numpy as np
14
+ import pandas as pd
15
+ import librosa
16
+ import resampy
17
+ import soundfile as sf
18
+ import tensorflow as tf
19
+ from tensorflow.keras.models import load_model
20
+
21
+
22
+ logging.basicConfig(
23
+ level=logging.INFO,
24
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
25
+ )
26
+ logger = logging.getLogger(__name__)
27
+
28
+ # Suppress TensorFlow warnings
29
+ tf.get_logger().setLevel(logging.ERROR)
30
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
31
+
32
+
33
+ @dataclass(frozen=True)
34
+ class YAMNetParams:
35
+ """Parameters for YAMNet model."""
36
+ sample_rate: float = 16000.0
37
+ stft_window_seconds: float = 0.025
38
+ stft_hop_seconds: float = 0.010
39
+ mel_bands: int = 64
40
+ mel_min_hz: float = 125.0
41
+ mel_max_hz: float = 7500.0
42
+ log_offset: float = 0.001
43
+ patch_window_seconds: float = 0.96
44
+ patch_hop_seconds: float = 0.48
45
+ num_classes: int = 521
46
+ conv_padding: str = 'same'
47
+ batchnorm_center: bool = True
48
+ batchnorm_scale: bool = False
49
+ batchnorm_epsilon: float = 1e-4
50
+ classifier_activation: str = 'sigmoid'
51
+ tflite_compatible: bool = True
52
+
53
+ @property
54
+ def patch_frames(self) -> int:
55
+ """Calculate number of frames per patch."""
56
+ return int(round(self.patch_window_seconds / self.stft_hop_seconds))
57
+
58
+ @property
59
+ def patch_bands(self) -> int:
60
+ """Get number of mel bands."""
61
+ return self.mel_bands
62
+
63
+
64
+ @dataclass
65
+ class Config:
66
+ """Configuration for models and processing parameters."""
67
+
68
+ yamnet_model_path: str
69
+ yamnet_classes_path: str
70
+ model_path: Optional[str] = None
71
+ custom_classes_path: Optional[str] = None
72
+ output_dir: str = "results"
73
+ output_file: str = "classification.txt"
74
+
75
+ # Processing parameters
76
+ window_length: int = 10 # seconds
77
+ hop_length: int = 1 # seconds
78
+ custom_weight_factor: float = 5.0
79
+ top_k: int = 10 # Number of top predictions to keep
80
+
81
+ # Exclude certain classes
82
+ excluded_classes: List[str] = field(default_factory=lambda: ["Vehicle"])
83
+
84
+ def __post_init__(self):
85
+ """Convert paths to absolute paths and ensure output directory exists."""
86
+ self.yamnet_model_path = os.path.abspath(self.yamnet_model_path)
87
+ self.yamnet_classes_path = os.path.abspath(self.yamnet_classes_path)
88
+
89
+ if self.model_path:
90
+ self.model_path = os.path.abspath(self.model_path)
91
+
92
+ if self.custom_classes_path:
93
+ self.custom_classes_path = os.path.abspath(self.custom_classes_path)
94
+
95
+ # Create output directory
96
+ os.makedirs(Path(self.output_dir), exist_ok=True)
97
+
98
+ @property
99
+ def output_path(self) -> str:
100
+ """Get full path to output file."""
101
+ return os.path.join(self.output_dir, self.output_file)
102
+
103
+ @classmethod
104
+ def from_args(cls, args: argparse.Namespace) -> 'Config':
105
+ """Create config from command line arguments."""
106
+ output_dir = os.path.dirname(args.output) or "results"
107
+ output_file = os.path.basename(args.output) or "classification.txt"
108
+
109
+ return cls(
110
+ yamnet_model_path=args.yamnet_model,
111
+ yamnet_classes_path=args.yamnet_classes,
112
+ model_path=args.model if os.path.exists(args.model) else None,
113
+ custom_classes_path=args.custom_classes if os.path.exists(args.custom_classes) else None,
114
+ output_dir=output_dir,
115
+ output_file=output_file,
116
+ window_length=args.window,
117
+ hop_length=args.hop,
118
+ custom_weight_factor=args.weight
119
+ )
120
+
121
+
122
+ class AudioClassifier:
123
+ """Audio classification using YAMNet and custom models."""
124
+
125
+ def __init__(self, config: Config):
126
+ """Initialize classifier with configuration."""
127
+ self.config = config
128
+ self.params = YAMNetParams()
129
+
130
+ # Initialize models
131
+ self.yamnet_model = None
132
+ self.model = None
133
+ self.yamnet_classes = []
134
+ self.custom_classes = []
135
+
136
+ # Load models
137
+ self._load_models()
138
+
139
+ def _load_models(self) -> None:
140
+ """Load YAMNet and custom models."""
141
+ # Load YAMNet model
142
+ try:
143
+ from yamnet import yamnet_frames_model, class_names
144
+
145
+ logger.info(f"Loading YAMNet model from {self.config.yamnet_model_path}")
146
+ self.yamnet_model = yamnet_frames_model(self.params)
147
+ self.yamnet_model.load_weights(self.config.yamnet_model_path)
148
+
149
+ logger.info(f"Loading YAMNet classes from {self.config.yamnet_classes_path}")
150
+ self.yamnet_classes = class_names(self.config.yamnet_classes_path)
151
+
152
+ except ImportError:
153
+ logger.error("YAMNet module not found. Please install it or provide correct path.")
154
+ raise
155
+ except Exception as e:
156
+ logger.error(f"Failed to load YAMNet model: {e}")
157
+ raise
158
+
159
+ # Load custom model if available
160
+ if self.config.model_path:
161
+ try:
162
+ logger.info(f"Loading custom model from {self.config.model_path}")
163
+ self.model = load_model(self.config.model_path)
164
+
165
+ if self.config.custom_classes_path:
166
+ logger.info(f"Loading custom classes from {self.config.custom_classes_path}")
167
+ self.custom_classes = np.load(self.config.custom_classes_path, allow_pickle=True)
168
+
169
+ except Exception as e:
170
+ logger.warning(f"Failed to load custom model: {e}")
171
+ logger.warning("Continuing with YAMNet model only.")
172
+ self.model = None
173
+ self.custom_classes = []
174
+
175
+ def classify_file(self, audio_path: str) -> Dict[str, Any]:
176
+ """Classify audio file and return results."""
177
+ logger.info(f"Processing audio file: {audio_path}")
178
+
179
+ # Load audio
180
+ waveform, sr = self._load_audio(audio_path)
181
+
182
+ # Process audio segments
183
+ logger.info("Processing audio segments...")
184
+ segments_results = self._process_audio_segments(waveform, sr)
185
+
186
+ # Aggregate results
187
+ logger.info("Aggregating results...")
188
+ final_results = self._aggregate_results(segments_results)
189
+
190
+ # Save results
191
+ if self.config.output_path:
192
+ self._save_results(final_results)
193
+
194
+ return final_results
195
+
196
+ def _load_audio(self, file_path: str) -> Tuple[np.ndarray, int]:
197
+ """Load and preprocess audio file."""
198
+
199
+ if not os.path.exists(file_path):
200
+ raise FileNotFoundError(f"Audio file not found: {file_path}")
201
+
202
+ # Load audio data
203
+ logger.info(f"Loading audio from {file_path}")
204
+ wav_data, sr = sf.read(file_path, dtype=np.int16)
205
+
206
+ # Convert to float32 in range [-1.0, 1.0]
207
+ waveform = wav_data / 32768.0
208
+ waveform = waveform.astype('float32')
209
+
210
+ # Convert stereo to mono if needed
211
+ if len(waveform.shape) > 1:
212
+ logger.info("Converting stereo audio to mono")
213
+ waveform = np.mean(waveform, axis=1)
214
+
215
+ # Resample if needed
216
+ if sr != self.params.sample_rate:
217
+ logger.info(f"Resampling audio from {sr}Hz to {self.params.sample_rate}Hz")
218
+ waveform = resampy.resample(waveform, sr, self.params.sample_rate)
219
+ sr = int(self.params.sample_rate)
220
+
221
+ return waveform, sr
222
+
223
+ def _process_audio_segments(self, waveform: np.ndarray, sr: int) -> List[Dict[str, Any]]:
224
+ """Process audio in segments."""
225
+ segment_length_samples = int(sr * self.config.window_length)
226
+ hop_length_samples = int(sr * self.config.hop_length)
227
+
228
+ if segment_length_samples <= 0:
229
+ raise ValueError(f"Invalid segment length: {self.config.window_length} seconds")
230
+
231
+ segments_results = []
232
+
233
+ # Process each segment
234
+ total_segments = max(1, (len(waveform) - segment_length_samples + hop_length_samples) // hop_length_samples)
235
+ for i in range(0, len(waveform) - segment_length_samples + 1, hop_length_samples):
236
+ segment_idx = i // hop_length_samples + 1
237
+ logger.debug(f"Processing segment {segment_idx}/{total_segments}")
238
+
239
+ end_idx = min(i + segment_length_samples, len(waveform))
240
+ window = waveform[i:end_idx]
241
+
242
+ # Get YAMNet predictions
243
+ yamnet_predictions = self._get_yamnet_predictions(window)
244
+
245
+ # Get custom model predictions if available
246
+ custom_predictions = None
247
+ if self.model is not None:
248
+ custom_predictions = self._get_custom_predictions(window)
249
+
250
+ # Combine predictions
251
+ combined_results = self._combine_predictions(yamnet_predictions, custom_predictions)
252
+
253
+ # Store results
254
+ segment_result = {
255
+ 'yamnet_predictions': yamnet_predictions,
256
+ 'custom_predictions': custom_predictions,
257
+ 'combined_predictions': combined_results
258
+ }
259
+
260
+ segments_results.append(segment_result)
261
+
262
+ return segments_results
263
+
264
+ def _get_yamnet_predictions(self, audio_segment: np.ndarray) -> Dict[str, float]:
265
+ """Get YAMNet predictions for an audio segment."""
266
+ try:
267
+ scores, embeddings, spectrogram = self.yamnet_model(audio_segment)
268
+ prediction = np.mean(scores, axis=0)
269
+
270
+ # Get top predictions
271
+ top_indices = np.argsort(prediction)[::-1][:self.config.top_k]
272
+ top_labels = [self.yamnet_classes[i] for i in top_indices]
273
+ top_scores = prediction[top_indices]
274
+
275
+ return {label: float(score) for label, score in zip(top_labels, top_scores)}
276
+
277
+ except Exception as e:
278
+ logger.error(f"Error in YAMNet prediction: {e}")
279
+ return {}
280
+
281
+ def _get_custom_predictions(self, audio_segment: np.ndarray) -> Dict[str, float]:
282
+ """Get custom model predictions for an audio segment."""
283
+ try:
284
+ # Get YAMNet embeddings first
285
+ embeddings = self.yamnet_model(audio_segment)[1]
286
+
287
+ # Reshape embeddings for custom model
288
+ embeddings_reshaped = np.reshape(embeddings, (embeddings.shape[0], -1))
289
+
290
+ # Get predictions from custom model
291
+ predictions = self.model.predict(embeddings_reshaped, verbose=0)
292
+
293
+ # Calculate mean prediction over time
294
+ mean_predictions = np.mean(predictions, axis=0)
295
+
296
+ # Get top predictions
297
+ top_indices = np.argsort(mean_predictions)[::-1][:self.config.top_k]
298
+
299
+ # Check if custom classes are available
300
+ if len(self.custom_classes) > 0:
301
+ top_labels = [self.custom_classes[i] for i in top_indices]
302
+ else:
303
+ # Use numeric indices as labels if no class names are available
304
+ top_labels = [f"Class_{i}" for i in top_indices]
305
+
306
+ top_scores = mean_predictions[top_indices]
307
+
308
+ # Normalize scores
309
+ total_score = np.sum(top_scores)
310
+ if total_score > 0:
311
+ top_scores = top_scores / total_score
312
+
313
+ return {label: float(score) for label, score in zip(top_labels, top_scores)}
314
+
315
+ except Exception as e:
316
+ logger.error(f"Error in custom model prediction: {e}")
317
+ return {}
318
+
319
+ def _combine_predictions(
320
+ self,
321
+ yamnet_predictions: Dict[str, float],
322
+ custom_predictions: Optional[Dict[str, float]]
323
+ ) -> Dict[str, float]:
324
+ """Combine predictions from different models."""
325
+ combined = {}
326
+
327
+ # Add custom predictions with weighting
328
+ if custom_predictions:
329
+ for label, score in custom_predictions.items():
330
+ combined[label] = score * self.config.custom_weight_factor
331
+
332
+ # Add YAMNet predictions if not already present or if higher score
333
+ for label, score in yamnet_predictions.items():
334
+ if label not in self.config.excluded_classes:
335
+ if label not in combined or score > combined[label]:
336
+ combined[label] = score
337
+
338
+ return combined
339
+
340
+ def _aggregate_results(self, segments_results: List[Dict[str, Any]]) -> Dict[str, Any]:
341
+ """Aggregate results across all segments."""
342
+ # Initialize aggregated predictions
343
+ aggregated_predictions = {}
344
+
345
+ # Collect all combined predictions, keeping maximum score per label
346
+ for segment in segments_results:
347
+ for label, score in segment['combined_predictions'].items():
348
+ if label in aggregated_predictions:
349
+ aggregated_predictions[label] = max(aggregated_predictions[label], score)
350
+ else:
351
+ aggregated_predictions[label] = score
352
+
353
+ # Process results
354
+ if aggregated_predictions:
355
+ # Get top predictions
356
+ sorted_predictions = sorted(
357
+ aggregated_predictions.items(),
358
+ key=lambda x: x[1],
359
+ reverse=True
360
+ )[:self.config.top_k]
361
+
362
+ # Create a new dictionary with only the top predictions
363
+ top_predictions = {label: score for label, score in sorted_predictions}
364
+
365
+ # Normalize scores
366
+ total_score = sum(top_predictions.values())
367
+ if total_score > 0:
368
+ normalized_predictions = {
369
+ label: score / total_score
370
+ for label, score in top_predictions.items()
371
+ }
372
+ else:
373
+ # Default to equal probabilities if all scores are 0
374
+ normalized_predictions = {
375
+ label: 1.0 / len(top_predictions) if len(top_predictions) > 0 else 0.0
376
+ for label in top_predictions
377
+ }
378
+
379
+ # Find dominant label
380
+ dominant_label, dominant_score = max(normalized_predictions.items(), key=lambda x: x[1])
381
+ dominant_score_percentage = round(dominant_score * 100)
382
+
383
+ # Replace original predictions with normalized ones
384
+ aggregated_predictions = normalized_predictions
385
+ else:
386
+ dominant_label = "Unknown"
387
+ dominant_score = 0
388
+ dominant_score_percentage = 0
389
+
390
+ return {
391
+ 'aggregated_predictions': aggregated_predictions,
392
+ 'dominant_label': dominant_label,
393
+ 'dominant_score': dominant_score,
394
+ 'dominant_score_percentage': dominant_score_percentage
395
+ }
396
+
397
+ def _save_results(self, results: Dict[str, Any]) -> None:
398
+ """Save classification results to file."""
399
+ try:
400
+ with open(self.config.output_path, 'w') as file:
401
+ file.write("Audio Classification Results\n")
402
+ file.write("=========================\n\n")
403
+ file.write(f"Primary Classification: {results['dominant_label']} ({results['dominant_score_percentage']}%)\n\n")
404
+
405
+ # Add detailed breakdown
406
+ file.write("Classification Details:\n")
407
+ file.write("-----------------\n")
408
+ sorted_predictions = sorted(
409
+ results['aggregated_predictions'].items(),
410
+ key=lambda x: x[1],
411
+ reverse=True
412
+ )
413
+
414
+ for label, score in sorted_predictions:
415
+ percentage = round(score * 100)
416
+ file.write(f"{label}: {percentage}%\n")
417
+
418
+ logger.info(f"Results saved to {self.config.output_path}")
419
+
420
+ except Exception as e:
421
+ logger.error(f"Error saving results: {e}")
422
+ raise
423
+
424
+
425
+ def main():
426
+ """Main function to run audio classification."""
427
+ parser = argparse.ArgumentParser(
428
+ description='Audio classification using YAMNet and custom models',
429
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
430
+ )
431
+
432
+ # Required arguments
433
+ parser.add_argument('audio_file', type=str, help='Path to audio file for classification')
434
+
435
+ # Model paths
436
+ parser.add_argument('--yamnet_model', type=str, default='yamnet/yamnet.h5',
437
+ help='Path to YAMNet model weights')
438
+ parser.add_argument('--yamnet_classes', type=str, default='yamnet/yamnet_class_map.csv',
439
+ help='Path to YAMNet class names')
440
+ parser.add_argument('--model', type=str, default='model/model.h5',
441
+ help='Path to custom model (optional)')
442
+ parser.add_argument('--custom_classes', type=str, default='model/model.npy',
443
+ help='Path to custom class names (optional)')
444
+
445
+ # Processing parameters
446
+ parser.add_argument('--window', type=int, default=10,
447
+ help='Window length in seconds')
448
+ parser.add_argument('--hop', type=int, default=1,
449
+ help='Hop length in seconds')
450
+ parser.add_argument('--weight', type=float, default=5.0,
451
+ help='Weighting factor for custom model predictions')
452
+
453
+ # Output options
454
+ parser.add_argument('--output', type=str, default='results/classification.txt',
455
+ help='Path to output file')
456
+
457
+ # Logging options
458
+ parser.add_argument('--verbose', action='store_true',
459
+ help='Enable verbose output')
460
+ parser.add_argument('--debug', action='store_true',
461
+ help='Enable debug logging')
462
+
463
+ args = parser.parse_args()
464
+
465
+ # Configure logging
466
+ if args.debug:
467
+ logger.setLevel(logging.DEBUG)
468
+ elif args.verbose:
469
+ logger.setLevel(logging.INFO)
470
+ else:
471
+ logger.setLevel(logging.WARNING)
472
+
473
+ try:
474
+ # Create configuration
475
+ config = Config.from_args(args)
476
+
477
+ # Create classifier
478
+ classifier = AudioClassifier(config)
479
+
480
+ # Process audio file
481
+ results = classifier.classify_file(args.audio_file)
482
+
483
+ # Print results
484
+ print("\nAudio Classification Results")
485
+ print("=========================")
486
+ print(f"\nPrimary Classification: {results['dominant_label']} ({results['dominant_score_percentage']}%)")
487
+
488
+ if args.verbose:
489
+ print("\nTop 10 Predictions:")
490
+ print("-----------------")
491
+ sorted_predictions = sorted(
492
+ results['aggregated_predictions'].items(),
493
+ key=lambda x: x[1],
494
+ reverse=True
495
+ )
496
+
497
+ for label, score in sorted_predictions:
498
+ percentage = round(score * 100)
499
+ print(f"{label}: {percentage}%")
500
+
501
+ print(f"\nFull results saved to: {config.output_path}")
502
+
503
+ except Exception as e:
504
+ logger.error(f"Error: {e}")
505
+ if args.debug:
506
+ import traceback
507
+ traceback.print_exc()
508
+ return 1
509
+
510
+ return 0
511
+
512
+
513
+ if __name__ == '__main__':
514
+ import sys
515
+ sys.exit(main())
model/model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad065a5b7abb72dfe41e9fe1d5bb238c43b174559a9f94d62da217d41f44b6b
3
+ size 12671536
model/model.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0139d6fdecc7e44160ddbe67127e9b24760687be87ca6a030b9ffbccc2dbe126
3
+ size 640
params.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Hyperparameters for YAMNet."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+ # The following hyperparameters (except patch_hop_seconds) were used to train YAMNet,
6
+ # so expect some variability in performance if you change these. The patch hop can
7
+ # be changed arbitrarily: a smaller hop should give you more patches from the same
8
+ # clip and possibly better performance at a larger computational cost.
9
+ @dataclass(frozen=True) # Instances of this class are immutable.
10
+ class Params:
11
+ sample_rate: float = 16000.0
12
+ stft_window_seconds: float = 0.025
13
+ stft_hop_seconds: float = 0.010
14
+ mel_bands: int = 64
15
+ mel_min_hz: float = 125.0
16
+ mel_max_hz: float = 7500.0
17
+ log_offset: float = 0.001
18
+ patch_window_seconds: float = 0.96
19
+ patch_hop_seconds: float = 0.48
20
+
21
+ @property
22
+ def patch_frames(self):
23
+ return int(round(self.patch_window_seconds / self.stft_hop_seconds))
24
+
25
+ @property
26
+ def patch_bands(self):
27
+ return self.mel_bands
28
+
29
+ num_classes: int = 521
30
+ conv_padding: str = 'same'
31
+ batchnorm_center: bool = True
32
+ batchnorm_scale: bool = False
33
+ batchnorm_epsilon: float = 1e-4
34
+ classifier_activation: str = 'sigmoid'
35
+
36
+ tflite_compatible: bool = True
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ librosa==0.11.0
2
+ numpy==1.26.4
3
+ pandas==2.2.3
4
+ tensorflow==2.9.0
5
+ resampy==0.4.3
6
+ ffmpeg
train.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Audio Classification System
3
+
4
+ This module trains a neural network model on audio data using YAMNet embeddings.
5
+ It extracts features from audio files and trains a classifier to recognize audio classes.
6
+
7
+ Usage:
8
+ python main.py --data_path <path_to_data> --model_name <model_name>
9
+
10
+ """
11
+
12
+ import os
13
+ import sys
14
+ import argparse
15
+ import logging
16
+ from pathlib import Path
17
+ from typing import Tuple, List, Dict, Optional, Any
18
+
19
+ import numpy as np
20
+ import pandas as pd
21
+ import tensorflow as tf
22
+ import librosa
23
+ from tqdm import tqdm
24
+ from sklearn.preprocessing import LabelBinarizer
25
+ from sklearn.utils import shuffle
26
+
27
+
28
+ logging.basicConfig(
29
+ level=logging.INFO,
30
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
31
+ handlers=[
32
+ logging.StreamHandler()
33
+ ]
34
+ )
35
+ logger = logging.getLogger(__name__)
36
+
37
+ # Default configuration
38
+ DEFAULT_CONFIG = {
39
+ 'yamnet_path': 'yamnet/yamnet.h5',
40
+ 'classes_path': 'yamnet/yamnet_class_map.csv',
41
+ 'sample_rate': 16000,
42
+ 'epochs': 100,
43
+ 'batch_size': 32,
44
+ 'learning_rate': 0.001,
45
+ 'num_hidden': 1024,
46
+ 'hidden_layer_size': 512,
47
+ 'num_extra_layers': 1,
48
+ 'dropout_rate': 0.3,
49
+ 'regularization': 0.01,
50
+ 'patience': 10,
51
+ 'validation_split': 0.2,
52
+ 'model_folder': 'model'
53
+ }
54
+
55
+
56
+ class Configuration:
57
+ """Handles configuration for the audio classification system."""
58
+
59
+ def __init__(self, custom_config: Optional[Dict[str, Any]] = None):
60
+ """
61
+ Initialize configuration handler.
62
+
63
+ Args:
64
+ custom_config: Custom configuration to override defaults
65
+ """
66
+ self.config = DEFAULT_CONFIG.copy()
67
+ if custom_config:
68
+ self.config.update(custom_config)
69
+
70
+ def get(self, key: str, default: Any = None) -> Any:
71
+ return self.config.get(key, default)
72
+
73
+ def set(self, key: str, value: Any) -> None:
74
+ self.config[key] = value
75
+
76
+ def __getitem__(self, key: str) -> Any:
77
+ return self.config[key]
78
+
79
+
80
+ class ClassMap:
81
+ """Handles audio class mapping and persistence."""
82
+
83
+ def __init__(self, config: Configuration):
84
+ """
85
+ Initialize class map.
86
+
87
+ Args:
88
+ config: Configuration handler
89
+ """
90
+ self.config = config
91
+ self.classes_path = config['classes_path']
92
+ self._ensure_classes_file_exists()
93
+
94
+ def _ensure_classes_file_exists(self) -> None:
95
+ """Ensure the classes mapping file exists."""
96
+ if not os.path.exists(self.classes_path):
97
+ logger.info(f"Class map file not found: {self.classes_path}. Creating a new one.")
98
+
99
+ pd.DataFrame({"display_name": [], "index": [], "mid": []}).to_csv(
100
+ self.classes_path, index=False
101
+ )
102
+
103
+ def load_yamnet_classes(self) -> np.ndarray:
104
+ """Load classes from YAMNet class map CSV file."""
105
+ try:
106
+ df = pd.read_csv(self.classes_path)
107
+ return df["display_name"].values
108
+ except Exception as e:
109
+ logger.error(f"Error loading classes: {str(e)}")
110
+ return np.array([])
111
+
112
+ def update_classes(self, data_path: str) -> List[str]:
113
+ """
114
+ Update classes based on directory structure.
115
+
116
+ Args:
117
+ data_path: Path to data directory
118
+
119
+ Returns:
120
+ List of all class names
121
+ """
122
+ try:
123
+ # Load existing classes mapping
124
+ existing_classes_df = pd.read_csv(self.classes_path)
125
+ existing_classes_set = set(existing_classes_df['display_name'])
126
+
127
+ # Find new classes
128
+ new_classes = []
129
+ for cls in sorted(os.listdir(data_path)):
130
+ class_path = os.path.join(data_path, cls)
131
+ if os.path.isdir(class_path) and cls not in existing_classes_set:
132
+ new_classes.append(cls)
133
+
134
+ # Append new classes to the existing classes dataframe
135
+ if new_classes:
136
+ logger.info(f"Adding {len(new_classes)} new classes: {', '.join(new_classes)}")
137
+ new_classes_df = pd.DataFrame({
138
+ 'display_name': new_classes,
139
+ 'index': [''] * len(new_classes),
140
+ 'mid': [''] * len(new_classes)
141
+ })
142
+ updated_classes_df = pd.concat([existing_classes_df, new_classes_df], ignore_index=True)
143
+ updated_classes_df.to_csv(self.classes_path, index=False)
144
+
145
+ # Return all classes from data directory
146
+ return [cls for cls in sorted(os.listdir(data_path))
147
+ if os.path.isdir(os.path.join(data_path, cls))]
148
+
149
+ except Exception as e:
150
+ logger.error(f"Error updating classes: {str(e)}")
151
+ raise
152
+
153
+
154
+ class FeatureExtractor:
155
+ """Extracts features from audio files using YAMNet."""
156
+
157
+ def __init__(self, config: Configuration):
158
+ """
159
+ Initialize feature extractor.
160
+
161
+ Args:
162
+ config: Configuration handler
163
+ """
164
+ self.config = config
165
+ self.yamnet_model = self._load_yamnet_model()
166
+
167
+ def _load_yamnet_model(self):
168
+ """Load YAMNet model for feature extraction."""
169
+ try:
170
+ logger.info("Loading YAMNet model...")
171
+ # Import here to avoid circular imports
172
+ from yamnet import yamnet_frames_model
173
+ from params import Params
174
+
175
+ model = yamnet_frames_model(Params())
176
+ model.load_weights(self.config['yamnet_path'])
177
+ return model
178
+ except Exception as e:
179
+ logger.error(f"Error loading YAMNet model: {str(e)}")
180
+ raise
181
+
182
+ def extract_features(self, audio_path: str) -> np.ndarray:
183
+ """
184
+ Extract features from an audio file using YAMNet.
185
+
186
+ Args:
187
+ audio_path: Path to audio file
188
+
189
+ Returns:
190
+ Numpy array of extracted features
191
+ """
192
+ try:
193
+ # Load audio file
194
+ wav, _ = librosa.load(
195
+ audio_path,
196
+ sr=self.config['sample_rate'],
197
+ mono=True
198
+ )
199
+ wav = wav.astype(np.float32)
200
+
201
+ if len(wav) == 0:
202
+ logger.warning(f"Warning: Empty audio file: {audio_path}")
203
+ return np.array([])
204
+
205
+ # Extract embeddings using YAMNet
206
+ _, embeddings, _ = self.yamnet_model(wav)
207
+ return embeddings.numpy()
208
+
209
+ except Exception as e:
210
+ logger.error(f"Error extracting features from {audio_path}: {str(e)}")
211
+ return np.array([])
212
+
213
+
214
+ class DatasetLoader:
215
+ """Creates a dataset from audio files."""
216
+
217
+ def __init__(self, config: Configuration, feature_extractor: FeatureExtractor):
218
+ """
219
+ Initialize dataset creator.
220
+
221
+ Args:
222
+ config: Configuration handler
223
+ feature_extractor: Feature extractor
224
+ """
225
+ self.config = config
226
+ self.feature_extractor = feature_extractor
227
+
228
+ def create_dataset(self, data_path: str, classes: List[str]) -> Tuple[np.ndarray, np.ndarray]:
229
+ """
230
+ Create a dataset from audio files in the specified path.
231
+
232
+ Args:
233
+ data_path: Path to the directory containing audio files organized in class folders
234
+ classes: List of class names
235
+
236
+ Returns:
237
+ samples: Numpy array of audio features
238
+ labels: Numpy array of corresponding labels
239
+ """
240
+ samples, labels = [], []
241
+
242
+ for cls in classes:
243
+ class_path = os.path.join(data_path, cls)
244
+ if not os.path.isdir(class_path):
245
+ continue
246
+
247
+ logger.info(f"Processing class: {cls}")
248
+ audio_files = os.listdir(class_path)
249
+
250
+ for sound in tqdm(audio_files, desc=f"Processing {cls}"):
251
+ audio_path = os.path.join(class_path, sound)
252
+ embeddings = self.feature_extractor.extract_features(audio_path)
253
+
254
+ if len(embeddings) == 0:
255
+ continue
256
+
257
+ # Store each embedding frame with its label
258
+ for embedding in embeddings:
259
+ samples.append(embedding)
260
+ labels.append(cls)
261
+
262
+ # Convert to numpy arrays
263
+ if not samples:
264
+ error_msg = "No valid audio samples were processed!"
265
+ logger.error(error_msg)
266
+ raise ValueError(error_msg)
267
+
268
+ samples = np.asarray(samples)
269
+ labels = np.asarray(labels)
270
+
271
+ logger.info(f"Created dataset with {len(samples)} samples across {len(set(labels))} classes")
272
+ return samples, labels
273
+
274
+
275
+ class ModelBuilder:
276
+ """Builds and trains neural network models for audio classification."""
277
+
278
+ def __init__(self, config: Configuration):
279
+ """
280
+ Initialize model builder.
281
+
282
+ Args:
283
+ config: Configuration handler
284
+ """
285
+ self.config = config
286
+
287
+ def build_model(self, num_classes: int) -> tf.keras.Model:
288
+ """
289
+ Build a neural network model for audio classification.
290
+
291
+ Args:
292
+ num_classes: Number of output classes
293
+
294
+ Returns:
295
+ Keras Model object
296
+ """
297
+ # Input layer (YAMNet embeddings are 1024-dimensional)
298
+ inputs = tf.keras.layers.Input(shape=(1024,))
299
+
300
+ # First hidden layer with L2 regularization
301
+ x = tf.keras.layers.Dense(
302
+ self.config['num_hidden'],
303
+ activation='relu',
304
+ kernel_regularizer=tf.keras.regularizers.l2(self.config['regularization'])
305
+ )(inputs)
306
+ x = tf.keras.layers.BatchNormalization()(x)
307
+ x = tf.keras.layers.Dropout(self.config['dropout_rate'])(x)
308
+
309
+ # Additional hidden layers
310
+ for i in range(self.config['num_extra_layers']):
311
+ layer_size = self.config['hidden_layer_size'] // (i+1)
312
+ x = tf.keras.layers.Dense(
313
+ layer_size,
314
+ activation='relu',
315
+ kernel_regularizer=tf.keras.regularizers.l2(self.config['regularization'])
316
+ )(x)
317
+ x = tf.keras.layers.BatchNormalization()(x)
318
+ x = tf.keras.layers.Dropout(self.config['dropout_rate'])(x)
319
+
320
+ # Output layer
321
+ outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
322
+
323
+ # Create and return model
324
+ model = tf.keras.Model(inputs=inputs, outputs=outputs)
325
+ return model
326
+
327
+ def _create_callbacks(self, model_path: str) -> List[tf.keras.callbacks.Callback]:
328
+ """
329
+ Create callbacks for model training.
330
+
331
+ Args:
332
+ model_path: Path to save the model
333
+
334
+ Returns:
335
+ List of callbacks
336
+ """
337
+ # Create tensorboard callback
338
+ log_dir = Path(f"logs/{os.path.basename(model_path)}")
339
+ log_dir.mkdir(parents=True, exist_ok=True)
340
+
341
+ tensorboard = tf.keras.callbacks.TensorBoard(
342
+ log_dir=log_dir,
343
+ histogram_freq=1
344
+ )
345
+
346
+ # Early stopping callback
347
+ early_stopping = tf.keras.callbacks.EarlyStopping(
348
+ monitor='val_accuracy',
349
+ patience=self.config['patience'],
350
+ restore_best_weights=True,
351
+ verbose=1
352
+ )
353
+
354
+ # Learning rate reduction callback
355
+ reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
356
+ monitor='val_loss',
357
+ factor=0.5,
358
+ patience=5,
359
+ min_lr=0.00001,
360
+ verbose=1
361
+ )
362
+
363
+ return [early_stopping, reduce_lr, tensorboard]
364
+
365
+ def train_model(self, X: np.ndarray, y: np.ndarray, model_name: str) -> Tuple[tf.keras.Model, LabelBinarizer]:
366
+ """
367
+ Train a model on the provided data.
368
+
369
+ Args:
370
+ X: Input features
371
+ y: Target labels
372
+ model_name: Name of the model
373
+
374
+ Returns:
375
+ Tuple of (trained model, label encoder)
376
+ """
377
+ # Encode the labels (one-hot encoding)
378
+ encoder = LabelBinarizer()
379
+ encoded_labels = encoder.fit_transform(y)
380
+ num_classes = len(encoder.classes_)
381
+
382
+ logger.info(f"Training model with {num_classes} classes: {', '.join(encoder.classes_)}")
383
+
384
+ # Create model
385
+ model = self.build_model(num_classes=num_classes)
386
+
387
+ # Print model summary
388
+ model.summary()
389
+
390
+ # Compile model
391
+ optimizer = tf.keras.optimizers.Adam(learning_rate=self.config['learning_rate'])
392
+ model.compile(
393
+ optimizer=optimizer,
394
+ loss=tf.keras.losses.CategoricalCrossentropy(),
395
+ metrics=['accuracy']
396
+ )
397
+
398
+ model_folder = os.path.join(self.config['model_folder'])
399
+ os.makedirs(model_folder, exist_ok=True)
400
+
401
+ model_path = os.path.join(model_folder, model_name)
402
+
403
+ callbacks = self._create_callbacks(model_path)
404
+
405
+ # Train the model
406
+ history = model.fit(
407
+ X, encoded_labels,
408
+ epochs=self.config['epochs'],
409
+ batch_size=self.config['batch_size'],
410
+ validation_split=self.config['validation_split'],
411
+ callbacks=callbacks,
412
+ verbose=1
413
+ )
414
+
415
+ # Save the model and class names
416
+ model.save(f"{model_path}.h5")
417
+ np.save(f"{model_path}_classes.npy", encoder.classes_)
418
+
419
+ # Save training history
420
+ hist_df = pd.DataFrame(history.history)
421
+ hist_df.to_csv(f"{model_path}_history.csv", index=False)
422
+
423
+ logger.info(f"Model saved as {model_path}.h5")
424
+ logger.info(f"Class names saved as {model_path}_classes.npy")
425
+
426
+ return model, encoder
427
+
428
+
429
+ def parse_arguments() -> argparse.Namespace:
430
+ """Parse command-line arguments."""
431
+ parser = argparse.ArgumentParser(description="Train an audio classification model")
432
+ parser.add_argument('--data_path', type=str, required=True,
433
+ help='Path to the directory containing audio files')
434
+ parser.add_argument('--model_name', type=str, required=True,
435
+ help='Name for the saved model')
436
+ parser.add_argument('--config', type=str,
437
+ help='Path to config JSON file (optional)')
438
+ parser.add_argument('--epochs', type=int, default=DEFAULT_CONFIG['epochs'],
439
+ help='Number of training epochs')
440
+ parser.add_argument('--batch_size', type=int, default=DEFAULT_CONFIG['batch_size'],
441
+ help='Batch size for training')
442
+ parser.add_argument('--learning_rate', type=float, default=DEFAULT_CONFIG['learning_rate'],
443
+ help='Initial learning rate')
444
+ parser.add_argument('--model_folder', type=str, default=DEFAULT_CONFIG['model_folder'],
445
+ help='Folder to save the model')
446
+
447
+ return parser.parse_args()
448
+
449
+
450
+ def load_custom_config(config_path: Optional[str]) -> Dict[str, Any]:
451
+ """Load custom configuration from a JSON file."""
452
+ if not config_path:
453
+ return {}
454
+
455
+ try:
456
+ import json
457
+ with open(config_path, 'r') as f:
458
+ return json.load(f)
459
+ except Exception as e:
460
+ logger.error(f"Error loading config file: {str(e)}")
461
+ return {}
462
+
463
+
464
+ def main():
465
+ """Main function to run the script."""
466
+ try:
467
+
468
+ args = parse_arguments()
469
+
470
+ # Load custom configuration
471
+ custom_config = load_custom_config(args.config)
472
+
473
+
474
+ custom_config.update({
475
+ 'epochs': args.epochs,
476
+ 'batch_size': args.batch_size,
477
+ 'learning_rate': args.learning_rate,
478
+ 'model_folder': args.model_folder
479
+ })
480
+
481
+ # Create configuration handler
482
+ config = Configuration(custom_config)
483
+
484
+ logger.info(f"Data path: {args.data_path}")
485
+ logger.info(f"Model name: {args.model_name}")
486
+ logger.info(f"Model folder: {config['model_folder']}")
487
+
488
+ # Initialize components
489
+ class_map = ClassMap(config)
490
+ feature_extractor = FeatureExtractor(config)
491
+ dataset_creator = DatasetLoader(config, feature_extractor)
492
+ model_builder = ModelBuilder(config)
493
+
494
+ # Update classes and get class list
495
+ classes = class_map.update_classes(args.data_path)
496
+
497
+ # Create dataset
498
+ samples, labels = dataset_creator.create_dataset(args.data_path, classes)
499
+
500
+ # Shuffle the data for better training
501
+ samples, labels = shuffle(samples, labels, random_state=42)
502
+
503
+ # Train model
504
+ model, encoder = model_builder.train_model(samples, labels, args.model_name)
505
+
506
+ logger.info("Training completed successfully!")
507
+
508
+ except Exception as e:
509
+ logger.error(f"Error during execution: {str(e)}", exc_info=True)
510
+ sys.exit(1)
511
+
512
+
513
+ if __name__ == "__main__":
514
+ main()
yamnet.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Core model definition of YAMNet."""
2
+ from tensorflow.keras.models import load_model
3
+
4
+ import csv
5
+
6
+ import numpy as np
7
+ import tensorflow as tf
8
+ from tensorflow.keras import Model, layers
9
+
10
+ import features as features_lib
11
+
12
+
13
+ def _batch_norm(name, params):
14
+ def _bn_layer(layer_input):
15
+ return layers.BatchNormalization(
16
+ name=name,
17
+ center=params.batchnorm_center,
18
+ scale=params.batchnorm_scale,
19
+ epsilon=params.batchnorm_epsilon)(layer_input)
20
+
21
+ return _bn_layer
22
+
23
+
24
+ def _conv(name, kernel, stride, filters, params):
25
+ def _conv_layer(layer_input):
26
+ output = layers.Conv2D(name='{}/conv'.format(name),
27
+ filters=filters,
28
+ kernel_size=kernel,
29
+ strides=stride,
30
+ padding=params.conv_padding,
31
+ use_bias=False,
32
+ activation=None)(layer_input)
33
+ output = _batch_norm('{}/conv/bn'.format(name), params)(output)
34
+ output = layers.ReLU(name='{}/relu'.format(name))(output)
35
+ return output
36
+
37
+ return _conv_layer
38
+
39
+
40
+ def _separable_conv(name, kernel, stride, filters, params):
41
+ def _separable_conv_layer(layer_input):
42
+ output = layers.DepthwiseConv2D(name='{}/depthwise_conv'.format(name),
43
+ kernel_size=kernel,
44
+ strides=stride,
45
+ depth_multiplier=1,
46
+ padding=params.conv_padding,
47
+ use_bias=False,
48
+ activation=None)(layer_input)
49
+ output = _batch_norm('{}/depthwise_conv/bn'.format(name), params)(output)
50
+ output = layers.ReLU(name='{}/depthwise_conv/relu'.format(name))(output)
51
+ output = layers.Conv2D(name='{}/pointwise_conv'.format(name),
52
+ filters=filters,
53
+ kernel_size=(1, 1),
54
+ strides=1,
55
+ padding=params.conv_padding,
56
+ use_bias=False,
57
+ activation=None)(output)
58
+ output = _batch_norm('{}/pointwise_conv/bn'.format(name), params)(output)
59
+ output = layers.ReLU(name='{}/pointwise_conv/relu'.format(name))(output)
60
+ return output
61
+
62
+ return _separable_conv_layer
63
+
64
+
65
+ _YAMNET_LAYER_DEFS = [
66
+ # (layer_function, kernel, stride, num_filters)
67
+ (_conv, [3, 3], 2, 32),
68
+ (_separable_conv, [3, 3], 1, 64),
69
+ (_separable_conv, [3, 3], 2, 128),
70
+ (_separable_conv, [3, 3], 1, 128),
71
+ (_separable_conv, [3, 3], 2, 256),
72
+ (_separable_conv, [3, 3], 1, 256),
73
+ (_separable_conv, [3, 3], 2, 512),
74
+ (_separable_conv, [3, 3], 1, 512),
75
+ (_separable_conv, [3, 3], 1, 512),
76
+ (_separable_conv, [3, 3], 1, 512),
77
+ (_separable_conv, [3, 3], 1, 512),
78
+ (_separable_conv, [3, 3], 1, 512),
79
+ (_separable_conv, [3, 3], 2, 1024),
80
+ (_separable_conv, [3, 3], 1, 1024)
81
+ ]
82
+
83
+
84
+ def yamnet(features, params):
85
+ """Define the core YAMNet mode in Keras."""
86
+ net = layers.Reshape(
87
+ (params.patch_frames, params.patch_bands, 1),
88
+ input_shape=(params.patch_frames, params.patch_bands))(features)
89
+ for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS):
90
+ net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters, params)(net)
91
+ embeddings = layers.GlobalAveragePooling2D()(net)
92
+ logits = layers.Dense(units=params.num_classes, use_bias=True)(embeddings)
93
+ predictions = layers.Activation(activation=params.classifier_activation)(logits)
94
+ return predictions, embeddings
95
+
96
+
97
+ def yamnet_frames_model(params):
98
+ """Defines the YAMNet waveform-to-class-scores model.
99
+
100
+ Args:
101
+ params: An instance of Params containing hyperparameters.
102
+
103
+ Returns:
104
+ A model accepting (num_samples,) waveform input and emitting:
105
+ - predictions: (num_patches, num_classes) matrix of class scores per time frame
106
+ - embeddings: (num_patches, embedding size) matrix of embeddings per time frame
107
+ - log_mel_spectrogram: (num_spectrogram_frames, num_mel_bins) spectrogram feature matrix
108
+ """
109
+ waveform = layers.Input(batch_shape=(None,), dtype=tf.float32)
110
+ waveform_padded = features_lib.pad_waveform(waveform, params)
111
+ log_mel_spectrogram, features = features_lib.waveform_to_log_mel_spectrogram_patches(
112
+ waveform_padded, params)
113
+ predictions, embeddings = yamnet(features, params)
114
+ frames_model = Model(
115
+ name='yamnet_frames', inputs=waveform,
116
+ outputs=[predictions, embeddings, log_mel_spectrogram])
117
+ return frames_model
118
+
119
+
120
+ def yamnet_transfer(features, params):
121
+ net = layers.Reshape(
122
+ (params.patch_frames, params.patch_bands, 1),
123
+ input_shape=(params.patch_frames, params.patch_bands))(features)
124
+ for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS):
125
+ net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters, params)(net)
126
+ embeddings = layers.GlobalAveragePooling2D()(net)
127
+ return embeddings
128
+
129
+
130
+ def yamnet_frames_model_transfer(params, last_layers):
131
+ """Defines the YAMNet waveform-to-class-scores model.
132
+
133
+ Args:
134
+ params: An instance of Params containing hyperparameters;
135
+ last_layers: Path to the classifier model.
136
+ Returns:
137
+ A model accepting (num_samples,) waveform input and emitting:
138
+ - predictions: (num_patches, num_classes) matrix of class scores per time frame
139
+ - embeddings: (num_patches, embedding size) matrix of embeddings per time frame
140
+ """
141
+
142
+ waveform = layers.Input(batch_shape=(None,), dtype=tf.float32)
143
+ waveform_padded = features_lib.pad_waveform(waveform, params)
144
+ _, features = features_lib.waveform_to_log_mel_spectrogram_patches(
145
+ waveform_padded, params)
146
+ embeddings = yamnet_transfer(features, params)
147
+ prediction = embeddings
148
+ last_layers = load_model(last_layers)
149
+ for layer in last_layers.layers[1:]:
150
+ prediction = layer(prediction)
151
+ frames_model = Model(
152
+ name='yamnet_frames', inputs=waveform,
153
+ outputs=[prediction, embeddings])
154
+ return frames_model
155
+
156
+
157
+ def class_names(class_map_csv):
158
+ """Read the class name definition file and return a list of strings."""
159
+ if tf.is_tensor(class_map_csv):
160
+ class_map_csv = class_map_csv.numpy()
161
+ with open(class_map_csv) as csv_file:
162
+ reader = csv.reader(csv_file)
163
+ next(reader) # Skip header
164
+ return np.array([display_name for (_, _, display_name) in reader])
yamnet/yamnet.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c3308955bbfaef262f175ac9c40e47b134573a93984f009220dd7cc12a1744
3
+ size 15296092
yamnet/yamnet_class_map.csv ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ index,mid,display_name
2
+ 0.0,/m/09x0r,Speech
3
+ 1.0,/m/0ytgt,"Child speech, kid speaking"
4
+ 2.0,/m/01h8n0,Conversation
5
+ 3.0,/m/02qldy,"Narration, monologue"
6
+ 4.0,/m/0261r1,Babbling
7
+ 5.0,/m/0brhx,Speech synthesizer
8
+ 6.0,/m/07p6fty,Shout
9
+ 7.0,/m/07q4ntr,Bellow
10
+ 8.0,/m/07rwj3x,Whoop
11
+ 9.0,/m/07sr1lc,Yell
12
+ 10.0,/t/dd00135,Children shouting
13
+ 11.0,/m/03qc9zr,Screaming
14
+ 12.0,/m/02rtxlg,Whispering
15
+ 13.0,/m/01j3sz,Laughter
16
+ 14.0,/t/dd00001,Baby laughter
17
+ 15.0,/m/07r660_,Giggle
18
+ 16.0,/m/07s04w4,Snicker
19
+ 17.0,/m/07sq110,Belly laugh
20
+ 18.0,/m/07rgt08,"Chuckle, chortle"
21
+ 19.0,/m/0463cq4,"Crying, sobbing"
22
+ 20.0,/t/dd00002,"Baby cry, infant cry"
23
+ 21.0,/m/07qz6j3,Whimper
24
+ 22.0,/m/07qw_06,"Wail, moan"
25
+ 23.0,/m/07plz5l,Sigh
26
+ 24.0,/m/015lz1,Singing
27
+ 25.0,/m/0l14jd,Choir
28
+ 26.0,/m/01swy6,Yodeling
29
+ 27.0,/m/02bk07,Chant
30
+ 28.0,/m/01c194,Mantra
31
+ 29.0,/t/dd00005,Child singing
32
+ 30.0,/t/dd00006,Synthetic singing
33
+ 31.0,/m/06bxc,Rapping
34
+ 32.0,/m/02fxyj,Humming
35
+ 33.0,/m/07s2xch,Groan
36
+ 34.0,/m/07r4k75,Grunt
37
+ 35.0,/m/01w250,Whistling
38
+ 36.0,/m/0lyf6,Breathing
39
+ 37.0,/m/07mzm6,Wheeze
40
+ 38.0,/m/01d3sd,Snoring
41
+ 39.0,/m/07s0dtb,Gasp
42
+ 40.0,/m/07pyy8b,Pant
43
+ 41.0,/m/07q0yl5,Snort
44
+ 42.0,/m/01b_21,Cough
45
+ 43.0,/m/0dl9sf8,Throat clearing
46
+ 44.0,/m/01hsr_,Sneeze
47
+ 45.0,/m/07ppn3j,Sniff
48
+ 46.0,/m/06h7j,Run
49
+ 47.0,/m/07qv_x_,Shuffle
50
+ 48.0,/m/07pbtc8,"Walk, footsteps"
51
+ 49.0,/m/03cczk,"Chewing, mastication"
52
+ 50.0,/m/07pdhp0,Biting
53
+ 51.0,/m/0939n_,Gargling
54
+ 52.0,/m/01g90h,Stomach rumble
55
+ 53.0,/m/03q5_w,"Burping, eructation"
56
+ 54.0,/m/02p3nc,Hiccup
57
+ 55.0,/m/02_nn,Fart
58
+ 56.0,/m/0k65p,Hands
59
+ 57.0,/m/025_jnm,Finger snapping
60
+ 58.0,/m/0l15bq,Clapping
61
+ 59.0,/m/01jg02,"Heart sounds, heartbeat"
62
+ 60.0,/m/01jg1z,Heart murmur
63
+ 61.0,/m/053hz1,Cheering
64
+ 62.0,/m/028ght,Applause
65
+ 63.0,/m/07rkbfh,Chatter
66
+ 64.0,/m/03qtwd,Crowd
67
+ 65.0,/m/07qfr4h,"Hubbub, speech noise, speech babble"
68
+ 66.0,/t/dd00013,Children playing
69
+ 67.0,/m/0jbk,Animal
70
+ 68.0,/m/068hy,"Domestic animals, pets"
71
+ 69.0,/m/0bt9lr,Dog
72
+ 70.0,/m/05tny_,Bark
73
+ 71.0,/m/07r_k2n,Yip
74
+ 72.0,/m/07qf0zm,Howl
75
+ 73.0,/m/07rc7d9,Bow-wow
76
+ 74.0,/m/0ghcn6,Growling
77
+ 75.0,/t/dd00136,Whimper (dog)
78
+ 76.0,/m/01yrx,Cat
79
+ 77.0,/m/02yds9,Purr
80
+ 78.0,/m/07qrkrw,Meow
81
+ 79.0,/m/07rjwbb,Hiss
82
+ 80.0,/m/07r81j2,Caterwaul
83
+ 81.0,/m/0ch8v,"Livestock, farm animals, working animals"
84
+ 82.0,/m/03k3r,Horse
85
+ 83.0,/m/07rv9rh,Clip-clop
86
+ 84.0,/m/07q5rw0,"Neigh, whinny"
87
+ 85.0,/m/01xq0k1,"Cattle, bovinae"
88
+ 86.0,/m/07rpkh9,Moo
89
+ 87.0,/m/0239kh,Cowbell
90
+ 88.0,/m/068zj,Pig
91
+ 89.0,/t/dd00018,Oink
92
+ 90.0,/m/03fwl,Goat
93
+ 91.0,/m/07q0h5t,Bleat
94
+ 92.0,/m/07bgp,Sheep
95
+ 93.0,/m/025rv6n,Fowl
96
+ 94.0,/m/09b5t,"Chicken, rooster"
97
+ 95.0,/m/07st89h,Cluck
98
+ 96.0,/m/07qn5dc,"Crowing, cock-a-doodle-doo"
99
+ 97.0,/m/01rd7k,Turkey
100
+ 98.0,/m/07svc2k,Gobble
101
+ 99.0,/m/09ddx,Duck
102
+ 100.0,/m/07qdb04,Quack
103
+ 101.0,/m/0dbvp,Goose
104
+ 102.0,/m/07qwf61,Honk
105
+ 103.0,/m/01280g,Wild animals
106
+ 104.0,/m/0cdnk,"Roaring cats (lions, tigers)"
107
+ 105.0,/m/04cvmfc,Roar
108
+ 106.0,/m/015p6,Bird
109
+ 107.0,/m/020bb7,"Bird vocalization, bird call, bird song"
110
+ 108.0,/m/07pggtn,"Chirp, tweet"
111
+ 109.0,/m/07sx8x_,Squawk
112
+ 110.0,/m/0h0rv,"Pigeon, dove"
113
+ 111.0,/m/07r_25d,Coo
114
+ 112.0,/m/04s8yn,Crow
115
+ 113.0,/m/07r5c2p,Caw
116
+ 114.0,/m/09d5_,Owl
117
+ 115.0,/m/07r_80w,Hoot
118
+ 116.0,/m/05_wcq,"Bird flight, flapping wings"
119
+ 117.0,/m/01z5f,"Canidae, dogs, wolves"
120
+ 118.0,/m/06hps,"Rodents, rats, mice"
121
+ 119.0,/m/04rmv,Mouse
122
+ 120.0,/m/07r4gkf,Patter
123
+ 121.0,/m/03vt0,Insect
124
+ 122.0,/m/09xqv,Cricket
125
+ 123.0,/m/09f96,Mosquito
126
+ 124.0,/m/0h2mp,"Fly, housefly"
127
+ 125.0,/m/07pjwq1,Buzz
128
+ 126.0,/m/01h3n,"Bee, wasp, etc."
129
+ 127.0,/m/09ld4,Frog
130
+ 128.0,/m/07st88b,Croak
131
+ 129.0,/m/078jl,Snake
132
+ 130.0,/m/07qn4z3,Rattle
133
+ 131.0,/m/032n05,Whale vocalization
134
+ 132.0,/m/04rlf,Music
135
+ 133.0,/m/04szw,Musical instrument
136
+ 134.0,/m/0fx80y,Plucked string instrument
137
+ 135.0,/m/0342h,Guitar
138
+ 136.0,/m/02sgy,Electric guitar
139
+ 137.0,/m/018vs,Bass guitar
140
+ 138.0,/m/042v_gx,Acoustic guitar
141
+ 139.0,/m/06w87,"Steel guitar, slide guitar"
142
+ 140.0,/m/01glhc,Tapping (guitar technique)
143
+ 141.0,/m/07s0s5r,Strum
144
+ 142.0,/m/018j2,Banjo
145
+ 143.0,/m/0jtg0,Sitar
146
+ 144.0,/m/04rzd,Mandolin
147
+ 145.0,/m/01bns_,Zither
148
+ 146.0,/m/07xzm,Ukulele
149
+ 147.0,/m/05148p4,Keyboard (musical)
150
+ 148.0,/m/05r5c,Piano
151
+ 149.0,/m/01s0ps,Electric piano
152
+ 150.0,/m/013y1f,Organ
153
+ 151.0,/m/03xq_f,Electronic organ
154
+ 152.0,/m/03gvt,Hammond organ
155
+ 153.0,/m/0l14qv,Synthesizer
156
+ 154.0,/m/01v1d8,Sampler
157
+ 155.0,/m/03q5t,Harpsichord
158
+ 156.0,/m/0l14md,Percussion
159
+ 157.0,/m/02hnl,Drum kit
160
+ 158.0,/m/0cfdd,Drum machine
161
+ 159.0,/m/026t6,Drum
162
+ 160.0,/m/06rvn,Snare drum
163
+ 161.0,/m/03t3fj,Rimshot
164
+ 162.0,/m/02k_mr,Drum roll
165
+ 163.0,/m/0bm02,Bass drum
166
+ 164.0,/m/011k_j,Timpani
167
+ 165.0,/m/01p970,Tabla
168
+ 166.0,/m/01qbl,Cymbal
169
+ 167.0,/m/03qtq,Hi-hat
170
+ 168.0,/m/01sm1g,Wood block
171
+ 169.0,/m/07brj,Tambourine
172
+ 170.0,/m/05r5wn,Rattle (instrument)
173
+ 171.0,/m/0xzly,Maraca
174
+ 172.0,/m/0mbct,Gong
175
+ 173.0,/m/016622,Tubular bells
176
+ 174.0,/m/0j45pbj,Mallet percussion
177
+ 175.0,/m/0dwsp,"Marimba, xylophone"
178
+ 176.0,/m/0dwtp,Glockenspiel
179
+ 177.0,/m/0dwt5,Vibraphone
180
+ 178.0,/m/0l156b,Steelpan
181
+ 179.0,/m/05pd6,Orchestra
182
+ 180.0,/m/01kcd,Brass instrument
183
+ 181.0,/m/0319l,French horn
184
+ 182.0,/m/07gql,Trumpet
185
+ 183.0,/m/07c6l,Trombone
186
+ 184.0,/m/0l14_3,Bowed string instrument
187
+ 185.0,/m/02qmj0d,String section
188
+ 186.0,/m/07y_7,"Violin, fiddle"
189
+ 187.0,/m/0d8_n,Pizzicato
190
+ 188.0,/m/01xqw,Cello
191
+ 189.0,/m/02fsn,Double bass
192
+ 190.0,/m/085jw,"Wind instrument, woodwind instrument"
193
+ 191.0,/m/0l14j_,Flute
194
+ 192.0,/m/06ncr,Saxophone
195
+ 193.0,/m/01wy6,Clarinet
196
+ 194.0,/m/03m5k,Harp
197
+ 195.0,/m/0395lw,Bell
198
+ 196.0,/m/03w41f,Church bell
199
+ 197.0,/m/027m70_,Jingle bell
200
+ 198.0,/m/0gy1t2s,Bicycle bell
201
+ 199.0,/m/07n_g,Tuning fork
202
+ 200.0,/m/0f8s22,Chime
203
+ 201.0,/m/026fgl,Wind chime
204
+ 202.0,/m/0150b9,Change ringing (campanology)
205
+ 203.0,/m/03qjg,Harmonica
206
+ 204.0,/m/0mkg,Accordion
207
+ 205.0,/m/0192l,Bagpipes
208
+ 206.0,/m/02bxd,Didgeridoo
209
+ 207.0,/m/0l14l2,Shofar
210
+ 208.0,/m/07kc_,Theremin
211
+ 209.0,/m/0l14t7,Singing bowl
212
+ 210.0,/m/01hgjl,Scratching (performance technique)
213
+ 211.0,/m/064t9,Pop music
214
+ 212.0,/m/0glt670,Hip hop music
215
+ 213.0,/m/02cz_7,Beatboxing
216
+ 214.0,/m/06by7,Rock music
217
+ 215.0,/m/03lty,Heavy metal
218
+ 216.0,/m/05r6t,Punk rock
219
+ 217.0,/m/0dls3,Grunge
220
+ 218.0,/m/0dl5d,Progressive rock
221
+ 219.0,/m/07sbbz2,Rock and roll
222
+ 220.0,/m/05w3f,Psychedelic rock
223
+ 221.0,/m/06j6l,Rhythm and blues
224
+ 222.0,/m/0gywn,Soul music
225
+ 223.0,/m/06cqb,Reggae
226
+ 224.0,/m/01lyv,Country
227
+ 225.0,/m/015y_n,Swing music
228
+ 226.0,/m/0gg8l,Bluegrass
229
+ 227.0,/m/02x8m,Funk
230
+ 228.0,/m/02w4v,Folk music
231
+ 229.0,/m/06j64v,Middle Eastern music
232
+ 230.0,/m/03_d0,Jazz
233
+ 231.0,/m/026z9,Disco
234
+ 232.0,/m/0ggq0m,Classical music
235
+ 233.0,/m/05lls,Opera
236
+ 234.0,/m/02lkt,Electronic music
237
+ 235.0,/m/03mb9,House music
238
+ 236.0,/m/07gxw,Techno
239
+ 237.0,/m/07s72n,Dubstep
240
+ 238.0,/m/0283d,Drum and bass
241
+ 239.0,/m/0m0jc,Electronica
242
+ 240.0,/m/08cyft,Electronic dance music
243
+ 241.0,/m/0fd3y,Ambient music
244
+ 242.0,/m/07lnk,Trance music
245
+ 243.0,/m/0g293,Music of Latin America
246
+ 244.0,/m/0ln16,Salsa music
247
+ 245.0,/m/0326g,Flamenco
248
+ 246.0,/m/0155w,Blues
249
+ 247.0,/m/05fw6t,Music for children
250
+ 248.0,/m/02v2lh,New-age music
251
+ 249.0,/m/0y4f8,Vocal music
252
+ 250.0,/m/0z9c,A capella
253
+ 251.0,/m/0164x2,Music of Africa
254
+ 252.0,/m/0145m,Afrobeat
255
+ 253.0,/m/02mscn,Christian music
256
+ 254.0,/m/016cjb,Gospel music
257
+ 255.0,/m/028sqc,Music of Asia
258
+ 256.0,/m/015vgc,Carnatic music
259
+ 257.0,/m/0dq0md,Music of Bollywood
260
+ 258.0,/m/06rqw,Ska
261
+ 259.0,/m/02p0sh1,Traditional music
262
+ 260.0,/m/05rwpb,Independent music
263
+ 261.0,/m/074ft,Song
264
+ 262.0,/m/025td0t,Background music
265
+ 263.0,/m/02cjck,Theme music
266
+ 264.0,/m/03r5q_,Jingle (music)
267
+ 265.0,/m/0l14gg,Soundtrack music
268
+ 266.0,/m/07pkxdp,Lullaby
269
+ 267.0,/m/01z7dr,Video game music
270
+ 268.0,/m/0140xf,Christmas music
271
+ 269.0,/m/0ggx5q,Dance music
272
+ 270.0,/m/04wptg,Wedding music
273
+ 271.0,/t/dd00031,Happy music
274
+ 272.0,/t/dd00033,Sad music
275
+ 273.0,/t/dd00034,Tender music
276
+ 274.0,/t/dd00035,Exciting music
277
+ 275.0,/t/dd00036,Angry music
278
+ 276.0,/t/dd00037,Scary music
279
+ 277.0,/m/03m9d0z,Wind
280
+ 278.0,/m/09t49,Rustling leaves
281
+ 279.0,/t/dd00092,Wind noise (microphone)
282
+ 280.0,/m/0jb2l,Thunderstorm
283
+ 281.0,/m/0ngt1,Thunder
284
+ 282.0,/m/0838f,Water
285
+ 283.0,/m/06mb1,Rain
286
+ 284.0,/m/07r10fb,Raindrop
287
+ 285.0,/t/dd00038,Rain on surface
288
+ 286.0,/m/0j6m2,Stream
289
+ 287.0,/m/0j2kx,Waterfall
290
+ 288.0,/m/05kq4,Ocean
291
+ 289.0,/m/034srq,"Waves, surf"
292
+ 290.0,/m/06wzb,Steam
293
+ 291.0,/m/07swgks,Gurgling
294
+ 292.0,/m/02_41,Fire
295
+ 293.0,/m/07pzfmf,Crackle
296
+ 294.0,/m/07yv9,Vehicle
297
+ 295.0,/m/019jd,"Boat, Water vehicle"
298
+ 296.0,/m/0hsrw,"Sailboat, sailing ship"
299
+ 297.0,/m/056ks2,"Rowboat, canoe, kayak"
300
+ 298.0,/m/02rlv9,"Motorboat, speedboat"
301
+ 299.0,/m/06q74,Ship
302
+ 300.0,/m/012f08,Motor vehicle (road)
303
+ 301.0,/m/0k4j,Car
304
+ 302.0,/m/0912c9,"Vehicle horn, car horn, honking"
305
+ 303.0,/m/07qv_d5,Toot
306
+ 304.0,/m/02mfyn,Car alarm
307
+ 305.0,/m/04gxbd,"Power windows, electric windows"
308
+ 306.0,/m/07rknqz,Skidding
309
+ 307.0,/m/0h9mv,Tire squeal
310
+ 308.0,/t/dd00134,Car passing by
311
+ 309.0,/m/0ltv,"Race car, auto racing"
312
+ 310.0,/m/07r04,Truck
313
+ 311.0,/m/0gvgw0,Air brake
314
+ 312.0,/m/05x_td,"Air horn, truck horn"
315
+ 313.0,/m/02rhddq,Reversing beeps
316
+ 314.0,/m/03cl9h,"Ice cream truck, ice cream van"
317
+ 315.0,/m/01bjv,Bus
318
+ 316.0,/m/03j1ly,Emergency vehicle
319
+ 317.0,/m/04qvtq,Police car (siren)
320
+ 318.0,/m/012n7d,Ambulance (siren)
321
+ 319.0,/m/012ndj,"Fire engine, fire truck (siren)"
322
+ 320.0,/m/04_sv,Motorcycle
323
+ 321.0,/m/0btp2,"Traffic noise, roadway noise"
324
+ 322.0,/m/06d_3,Rail transport
325
+ 323.0,/m/07jdr,Train
326
+ 324.0,/m/04zmvq,Train whistle
327
+ 325.0,/m/0284vy3,Train horn
328
+ 326.0,/m/01g50p,"Railroad car, train wagon"
329
+ 327.0,/t/dd00048,Train wheels squealing
330
+ 328.0,/m/0195fx,"Subway, metro, underground"
331
+ 329.0,/m/0k5j,Aircraft
332
+ 330.0,/m/014yck,Aircraft engine
333
+ 331.0,/m/04229,Jet engine
334
+ 332.0,/m/02l6bg,"Propeller, airscrew"
335
+ 333.0,/m/09ct_,Helicopter
336
+ 334.0,/m/0cmf2,"Fixed-wing aircraft, airplane"
337
+ 335.0,/m/0199g,Bicycle
338
+ 336.0,/m/06_fw,Skateboard
339
+ 337.0,/m/02mk9,Engine
340
+ 338.0,/t/dd00065,Light engine (high frequency)
341
+ 339.0,/m/08j51y,"Dental drill, dentist's drill"
342
+ 340.0,/m/01yg9g,Lawn mower
343
+ 341.0,/m/01j4z9,Chainsaw
344
+ 342.0,/t/dd00066,Medium engine (mid frequency)
345
+ 343.0,/t/dd00067,Heavy engine (low frequency)
346
+ 344.0,/m/01h82_,Engine knocking
347
+ 345.0,/t/dd00130,Engine starting
348
+ 346.0,/m/07pb8fc,Idling
349
+ 347.0,/m/07q2z82,"Accelerating, revving, vroom"
350
+ 348.0,/m/02dgv,Door
351
+ 349.0,/m/03wwcy,Doorbell
352
+ 350.0,/m/07r67yg,Ding-dong
353
+ 351.0,/m/02y_763,Sliding door
354
+ 352.0,/m/07rjzl8,Slam
355
+ 353.0,/m/07r4wb8,Knock
356
+ 354.0,/m/07qcpgn,Tap
357
+ 355.0,/m/07q6cd_,Squeak
358
+ 356.0,/m/0642b4,Cupboard open or close
359
+ 357.0,/m/0fqfqc,Drawer open or close
360
+ 358.0,/m/04brg2,"Dishes, pots, and pans"
361
+ 359.0,/m/023pjk,"Cutlery, silverware"
362
+ 360.0,/m/07pn_8q,Chopping (food)
363
+ 361.0,/m/0dxrf,Frying (food)
364
+ 362.0,/m/0fx9l,Microwave oven
365
+ 363.0,/m/02pjr4,Blender
366
+ 364.0,/m/02jz0l,"Water tap, faucet"
367
+ 365.0,/m/0130jx,Sink (filling or washing)
368
+ 366.0,/m/03dnzn,Bathtub (filling or washing)
369
+ 367.0,/m/03wvsk,Hair dryer
370
+ 368.0,/m/01jt3m,Toilet flush
371
+ 369.0,/m/012xff,Toothbrush
372
+ 370.0,/m/04fgwm,Electric toothbrush
373
+ 371.0,/m/0d31p,Vacuum cleaner
374
+ 372.0,/m/01s0vc,Zipper (clothing)
375
+ 373.0,/m/03v3yw,Keys jangling
376
+ 374.0,/m/0242l,Coin (dropping)
377
+ 375.0,/m/01lsmm,Scissors
378
+ 376.0,/m/02g901,"Electric shaver, electric razor"
379
+ 377.0,/m/05rj2,Shuffling cards
380
+ 378.0,/m/0316dw,Typing
381
+ 379.0,/m/0c2wf,Typewriter
382
+ 380.0,/m/01m2v,Computer keyboard
383
+ 381.0,/m/081rb,Writing
384
+ 382.0,/m/07pp_mv,Alarm
385
+ 383.0,/m/07cx4,Telephone
386
+ 384.0,/m/07pp8cl,Telephone bell ringing
387
+ 385.0,/m/01hnzm,Ringtone
388
+ 386.0,/m/02c8p,"Telephone dialing, DTMF"
389
+ 387.0,/m/015jpf,Dial tone
390
+ 388.0,/m/01z47d,Busy signal
391
+ 389.0,/m/046dlr,Alarm clock
392
+ 390.0,/m/03kmc9,Siren
393
+ 391.0,/m/0dgbq,Civil defense siren
394
+ 392.0,/m/030rvx,Buzzer
395
+ 393.0,/m/01y3hg,"Smoke detector, smoke alarm"
396
+ 394.0,/m/0c3f7m,Fire alarm
397
+ 395.0,/m/04fq5q,Foghorn
398
+ 396.0,/m/0l156k,Whistle
399
+ 397.0,/m/06hck5,Steam whistle
400
+ 398.0,/t/dd00077,Mechanisms
401
+ 399.0,/m/02bm9n,"Ratchet, pawl"
402
+ 400.0,/m/01x3z,Clock
403
+ 401.0,/m/07qjznt,Tick
404
+ 402.0,/m/07qjznl,Tick-tock
405
+ 403.0,/m/0l7xg,Gears
406
+ 404.0,/m/05zc1,Pulleys
407
+ 405.0,/m/0llzx,Sewing machine
408
+ 406.0,/m/02x984l,Mechanical fan
409
+ 407.0,/m/025wky1,Air conditioning
410
+ 408.0,/m/024dl,Cash register
411
+ 409.0,/m/01m4t,Printer
412
+ 410.0,/m/0dv5r,Camera
413
+ 411.0,/m/07bjf,Single-lens reflex camera
414
+ 412.0,/m/07k1x,Tools
415
+ 413.0,/m/03l9g,Hammer
416
+ 414.0,/m/03p19w,Jackhammer
417
+ 415.0,/m/01b82r,Sawing
418
+ 416.0,/m/02p01q,Filing (rasp)
419
+ 417.0,/m/023vsd,Sanding
420
+ 418.0,/m/0_ksk,Power tool
421
+ 419.0,/m/01d380,Drill
422
+ 420.0,/m/014zdl,Explosion
423
+ 421.0,/m/032s66,"Gunshot, gunfire"
424
+ 422.0,/m/04zjc,Machine gun
425
+ 423.0,/m/02z32qm,Fusillade
426
+ 424.0,/m/0_1c,Artillery fire
427
+ 425.0,/m/073cg4,Cap gun
428
+ 426.0,/m/0g6b5,Fireworks
429
+ 427.0,/g/122z_qxw,Firecracker
430
+ 428.0,/m/07qsvvw,"Burst, pop"
431
+ 429.0,/m/07pxg6y,Eruption
432
+ 430.0,/m/07qqyl4,Boom
433
+ 431.0,/m/083vt,Wood
434
+ 432.0,/m/07pczhz,Chop
435
+ 433.0,/m/07pl1bw,Splinter
436
+ 434.0,/m/07qs1cx,Crack
437
+ 435.0,/m/039jq,Glass
438
+ 436.0,/m/07q7njn,"Chink, clink"
439
+ 437.0,/m/07rn7sz,Shatter
440
+ 438.0,/m/04k94,Liquid
441
+ 439.0,/m/07rrlb6,"Splash, splatter"
442
+ 440.0,/m/07p6mqd,Slosh
443
+ 441.0,/m/07qlwh6,Squish
444
+ 442.0,/m/07r5v4s,Drip
445
+ 443.0,/m/07prgkl,Pour
446
+ 444.0,/m/07pqc89,"Trickle, dribble"
447
+ 445.0,/t/dd00088,Gush
448
+ 446.0,/m/07p7b8y,Fill (with liquid)
449
+ 447.0,/m/07qlf79,Spray
450
+ 448.0,/m/07ptzwd,Pump (liquid)
451
+ 449.0,/m/07ptfmf,Stir
452
+ 450.0,/m/0dv3j,Boiling
453
+ 451.0,/m/0790c,Sonar
454
+ 452.0,/m/0dl83,Arrow
455
+ 453.0,/m/07rqsjt,"Whoosh, swoosh, swish"
456
+ 454.0,/m/07qnq_y,"Thump, thud"
457
+ 455.0,/m/07rrh0c,Thunk
458
+ 456.0,/m/0b_fwt,Electronic tuner
459
+ 457.0,/m/02rr_,Effects unit
460
+ 458.0,/m/07m2kt,Chorus effect
461
+ 459.0,/m/018w8,Basketball bounce
462
+ 460.0,/m/07pws3f,Bang
463
+ 461.0,/m/07ryjzk,"Slap, smack"
464
+ 462.0,/m/07rdhzs,"Whack, thwack"
465
+ 463.0,/m/07pjjrj,"Smash, crash"
466
+ 464.0,/m/07pc8lb,Breaking
467
+ 465.0,/m/07pqn27,Bouncing
468
+ 466.0,/m/07rbp7_,Whip
469
+ 467.0,/m/07pyf11,Flap
470
+ 468.0,/m/07qb_dv,Scratch
471
+ 469.0,/m/07qv4k0,Scrape
472
+ 470.0,/m/07pdjhy,Rub
473
+ 471.0,/m/07s8j8t,Roll
474
+ 472.0,/m/07plct2,Crushing
475
+ 473.0,/t/dd00112,"Crumpling, crinkling"
476
+ 474.0,/m/07qcx4z,Tearing
477
+ 475.0,/m/02fs_r,"Beep, bleep"
478
+ 476.0,/m/07qwdck,Ping
479
+ 477.0,/m/07phxs1,Ding
480
+ 478.0,/m/07rv4dm,Clang
481
+ 479.0,/m/07s02z0,Squeal
482
+ 480.0,/m/07qh7jl,Creak
483
+ 481.0,/m/07qwyj0,Rustle
484
+ 482.0,/m/07s34ls,Whir
485
+ 483.0,/m/07qmpdm,Clatter
486
+ 484.0,/m/07p9k1k,Sizzle
487
+ 485.0,/m/07qc9xj,Clicking
488
+ 486.0,/m/07rwm0c,Clickety-clack
489
+ 487.0,/m/07phhsh,Rumble
490
+ 488.0,/m/07qyrcz,Plop
491
+ 489.0,/m/07qfgpx,"Jingle, tinkle"
492
+ 490.0,/m/07rcgpl,Hum
493
+ 491.0,/m/07p78v5,Zing
494
+ 492.0,/t/dd00121,Boing
495
+ 493.0,/m/07s12q4,Crunch
496
+ 494.0,/m/028v0c,Silence
497
+ 495.0,/m/01v_m0,Sine wave
498
+ 496.0,/m/0b9m1,Harmonic
499
+ 497.0,/m/0hdsk,Chirp tone
500
+ 498.0,/m/0c1dj,Sound effect
501
+ 499.0,/m/07pt_g0,Pulse
502
+ 500.0,/t/dd00125,"Inside, small room"
503
+ 501.0,/t/dd00126,"Inside, large room or hall"
504
+ 502.0,/t/dd00127,"Inside, public space"
505
+ 503.0,/t/dd00128,"Outside, urban or manmade"
506
+ 504.0,/t/dd00129,"Outside, rural or natural"
507
+ 505.0,/m/01b9nn,Reverberation
508
+ 506.0,/m/01jnbd,Echo
509
+ 507.0,/m/096m7z,Noise
510
+ 508.0,/m/06_y0by,Environmental noise
511
+ 509.0,/m/07rgkc5,Static
512
+ 510.0,/m/06xkwv,Mains hum
513
+ 511.0,/m/0g12c5,Distortion
514
+ 512.0,/m/08p9q4,Sidetone
515
+ 513.0,/m/07szfh9,Cacophony
516
+ 514.0,/m/0chx_,White noise
517
+ 515.0,/m/0cj0r,Pink noise
518
+ 516.0,/m/07p_0gm,Throbbing
519
+ 517.0,/m/01jwx6,Vibration
520
+ 518.0,/m/07c52,Television
521
+ 519.0,/m/06bz3,Radio
522
+ 520.0,/m/07hvw1,Field recording
523
+ ,,queen not present
524
+ ,,queen present and rejected
525
+ ,,queen present and newly accepted
526
+ ,,queen present or original queen
yamnet_test.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Installation test for YAMNet."""
2
+
3
+ import numpy as np
4
+ import tensorflow as tf
5
+
6
+ import params
7
+ import yamnet
8
+
9
+
10
+ class YAMNetTest(tf.test.TestCase):
11
+ _params = None
12
+ _yamnet = None
13
+ _yamnet_classes = None
14
+
15
+ @classmethod
16
+ def setUpClass(cls):
17
+ super().setUpClass()
18
+ cls._params = params.Params()
19
+ cls._yamnet = yamnet.yamnet_frames_model(cls._params)
20
+ cls._yamnet.load_weights('yamnet/yamnet.h5')
21
+ cls._yamnet_classes = yamnet.class_names('yamnet/yamnet_class_map.csv')
22
+
23
+ def clip_test(self, waveform, expected_class_name, top_n=10):
24
+ """Run the model on the waveform, check that expected class is in top-n."""
25
+ predictions, _, _ = YAMNetTest._yamnet(waveform)
26
+ clip_predictions = np.mean(predictions, axis=0)
27
+ top_n_indices = np.argsort(clip_predictions)[-top_n:]
28
+ top_n_scores = clip_predictions[top_n_indices]
29
+ top_n_class_names = YAMNetTest._yamnet_classes[top_n_indices]
30
+ top_n_predictions = list(zip(top_n_class_names, top_n_scores))
31
+ self.assertIn(expected_class_name, top_n_class_names,
32
+ 'Did not find expected class {} in top {} predictions: {}'.format(
33
+ expected_class_name, top_n, top_n_predictions))
34
+
35
+ def testZeros(self):
36
+ self.clip_test(
37
+ waveform=np.zeros((int(3 * YAMNetTest._params.sample_rate),)),
38
+ expected_class_name='Silence')
39
+
40
+ def testRandom(self):
41
+ # Create a numpy random Generator with a fixed seed for repeatability
42
+ rng = np.random.default_rng(51773)
43
+ self.clip_test(
44
+ waveform=rng.uniform(-1.0, +1.0,
45
+ (int(3 * YAMNetTest._params.sample_rate),)),
46
+ expected_class_name='White noise')
47
+
48
+ def testSine(self):
49
+ self.clip_test(
50
+ waveform=np.sin(2 * np.pi * 440 *
51
+ np.arange(0, 3, 1 / YAMNetTest._params.sample_rate)),
52
+ expected_class_name='Sine wave')
53
+
54
+
55
+ if __name__ == '__main__':
56
+ tf.test.main()