grider-transwithai commited on
Commit
6963d43
·
verified ·
1 Parent(s): 3e96984

Upload model and requirements files

Browse files
Files changed (4) hide show
  1. inference.py +690 -0
  2. model.onnx +3 -0
  3. model_metadata.json +21 -0
  4. requirements.txt +5 -0
inference.py ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """ONNX inference script for encoder_only_decoder VAD model - Silero-style implementation.
3
+
4
+ This implementation follows Silero VAD's architecture for cleaner, more efficient processing:
5
+ - Fixed-size chunk processing for consistent behavior
6
+ - State management for streaming capability
7
+ - Hysteresis-based speech detection (dual threshold)
8
+ - Simplified segment extraction with proper padding
9
+ """
10
+
11
+ import argparse
12
+ import json
13
+ import os
14
+ import time
15
+ import warnings
16
+ from pathlib import Path
17
+ from typing import Callable, Dict, List, Optional, Tuple
18
+
19
+ import librosa
20
+ import numpy as np
21
+ import torch
22
+ from transformers import WhisperFeatureExtractor
23
+
24
+
25
+ class WhisperVADOnnxWrapper:
26
+ """ONNX wrapper for Whisper-based VAD model following Silero's architecture."""
27
+
28
+ def __init__(
29
+ self,
30
+ model_path: str,
31
+ metadata_path: Optional[str] = None,
32
+ force_cpu: bool = False,
33
+ num_threads: int = 1,
34
+ ):
35
+ """Initialize ONNX model wrapper.
36
+
37
+ Args:
38
+ model_path: Path to ONNX model file
39
+ metadata_path: Path to metadata JSON file (optional)
40
+ force_cpu: Force CPU execution even if GPU is available
41
+ num_threads: Number of CPU threads for inference
42
+ """
43
+ try:
44
+ import onnxruntime as ort
45
+ except ImportError:
46
+ raise ImportError(
47
+ "onnxruntime not installed. Install with:\n"
48
+ " pip install onnxruntime # For CPU\n"
49
+ " pip install onnxruntime-gpu # For GPU"
50
+ )
51
+
52
+ self.model_path = model_path
53
+
54
+ # Load metadata
55
+ if metadata_path is None:
56
+ metadata_path = model_path.replace('.onnx', '_metadata.json')
57
+
58
+ if os.path.exists(metadata_path):
59
+ with open(metadata_path, 'r') as f:
60
+ self.metadata = json.load(f)
61
+ else:
62
+ warnings.warn("No metadata file found. Using default values.")
63
+ self.metadata = {
64
+ 'whisper_model_name': 'openai/whisper-base',
65
+ 'frame_duration_ms': 20,
66
+ 'total_duration_ms': 30000,
67
+ }
68
+
69
+ # Initialize feature extractor
70
+ self.feature_extractor = WhisperFeatureExtractor.from_pretrained(
71
+ self.metadata['whisper_model_name']
72
+ )
73
+
74
+ # Set up ONNX Runtime session
75
+ opts = ort.SessionOptions()
76
+ opts.inter_op_num_threads = num_threads
77
+ opts.intra_op_num_threads = num_threads
78
+
79
+ providers = ['CPUExecutionProvider']
80
+ if not force_cpu and 'CUDAExecutionProvider' in ort.get_available_providers():
81
+ providers.insert(0, 'CUDAExecutionProvider')
82
+
83
+ self.session = ort.InferenceSession(model_path, providers=providers, sess_options=opts)
84
+
85
+ # Get input/output info
86
+ self.input_name = self.session.get_inputs()[0].name
87
+ self.output_names = [out.name for out in self.session.get_outputs()]
88
+
89
+ # Model parameters
90
+ self.sample_rate = 16000 # Whisper uses 16kHz
91
+ self.frame_duration_ms = self.metadata.get('frame_duration_ms', 20)
92
+ self.chunk_duration_ms = self.metadata.get('total_duration_ms', 30000)
93
+ self.chunk_samples = int(self.chunk_duration_ms * self.sample_rate / 1000)
94
+ self.frames_per_chunk = int(self.chunk_duration_ms / self.frame_duration_ms)
95
+
96
+ # Initialize state
97
+ self.reset_states()
98
+
99
+ print(f"Model loaded: {model_path}")
100
+ print(f" Providers: {providers}")
101
+ print(f" Chunk duration: {self.chunk_duration_ms}ms")
102
+ print(f" Frame duration: {self.frame_duration_ms}ms")
103
+
104
+ def reset_states(self):
105
+ """Reset internal states for new audio stream."""
106
+ self._context = None
107
+ self._last_chunk = None
108
+
109
+ def _validate_input(self, audio: np.ndarray, sr: int) -> np.ndarray:
110
+ """Validate and preprocess input audio.
111
+
112
+ Args:
113
+ audio: Input audio array
114
+ sr: Sample rate
115
+
116
+ Returns:
117
+ Preprocessed audio at 16kHz
118
+ """
119
+ if audio.ndim > 1:
120
+ # Convert to mono if multi-channel
121
+ audio = audio.mean(axis=0 if audio.shape[0] > audio.shape[1] else 1)
122
+
123
+ # Resample if needed
124
+ if sr != self.sample_rate:
125
+ import librosa
126
+ audio = librosa.resample(audio, orig_sr=sr, target_sr=self.sample_rate)
127
+
128
+ return audio
129
+
130
+ def __call__(self, audio_chunk: np.ndarray, sr: int = 16000) -> np.ndarray:
131
+ """Process a single audio chunk.
132
+
133
+ Args:
134
+ audio_chunk: Audio chunk to process
135
+ sr: Sample rate
136
+
137
+ Returns:
138
+ Frame-level speech probabilities
139
+ """
140
+ # Validate input
141
+ audio_chunk = self._validate_input(audio_chunk, sr)
142
+
143
+ # Ensure chunk is correct size
144
+ if len(audio_chunk) < self.chunk_samples:
145
+ audio_chunk = np.pad(
146
+ audio_chunk,
147
+ (0, self.chunk_samples - len(audio_chunk)),
148
+ mode='constant'
149
+ )
150
+ elif len(audio_chunk) > self.chunk_samples:
151
+ audio_chunk = audio_chunk[:self.chunk_samples]
152
+
153
+ # Extract features
154
+ inputs = self.feature_extractor(
155
+ audio_chunk,
156
+ sampling_rate=self.sample_rate,
157
+ return_tensors="np"
158
+ )
159
+
160
+ # Run inference
161
+ outputs = self.session.run(
162
+ self.output_names,
163
+ {self.input_name: inputs.input_features}
164
+ )
165
+
166
+ # Apply sigmoid to get probabilities
167
+ frame_logits = outputs[0][0] # Remove batch dimension
168
+ frame_probs = 1 / (1 + np.exp(-frame_logits))
169
+
170
+ return frame_probs
171
+
172
+ def audio_forward(self, audio: np.ndarray, sr: int = 16000) -> np.ndarray:
173
+ """Process full audio file in chunks (Silero-style).
174
+
175
+ Args:
176
+ audio: Full audio array
177
+ sr: Sample rate
178
+
179
+ Returns:
180
+ Concatenated frame probabilities for entire audio
181
+ """
182
+ audio = self._validate_input(audio, sr)
183
+ self.reset_states()
184
+
185
+ all_probs = []
186
+
187
+ # Process in chunks
188
+ for i in range(0, len(audio), self.chunk_samples):
189
+ chunk = audio[i:i + self.chunk_samples]
190
+
191
+ # Pad last chunk if needed
192
+ if len(chunk) < self.chunk_samples:
193
+ chunk = np.pad(chunk, (0, self.chunk_samples - len(chunk)), mode='constant')
194
+
195
+ # Get predictions for chunk
196
+ chunk_probs = self.__call__(chunk, self.sample_rate)
197
+ all_probs.append(chunk_probs)
198
+
199
+ # Concatenate all probabilities
200
+ if all_probs:
201
+ return np.concatenate(all_probs)
202
+ return np.array([])
203
+
204
+
205
+ def get_speech_timestamps(
206
+ audio: np.ndarray,
207
+ model,
208
+ threshold: float = 0.5,
209
+ sampling_rate: int = 16000,
210
+ min_speech_duration_ms: int = 250,
211
+ max_speech_duration_s: float = float('inf'),
212
+ min_silence_duration_ms: int = 100,
213
+ speech_pad_ms: int = 30,
214
+ return_seconds: bool = False,
215
+ neg_threshold: Optional[float] = None,
216
+ progress_tracking_callback: Optional[Callable[[float], None]] = None,
217
+ ) -> List[Dict[str, float]]:
218
+ """Extract speech timestamps from audio using Silero-style processing.
219
+
220
+ This function implements Silero VAD's approach with:
221
+ - Dual threshold (positive and negative) for hysteresis
222
+ - Proper segment padding
223
+ - Minimum duration filtering
224
+ - Maximum duration handling with intelligent splitting
225
+
226
+ Args:
227
+ audio: Input audio array
228
+ model: VAD model (WhisperVADOnnxWrapper instance)
229
+ threshold: Speech threshold (default: 0.5)
230
+ sampling_rate: Audio sample rate
231
+ min_speech_duration_ms: Minimum speech segment duration
232
+ max_speech_duration_s: Maximum speech segment duration
233
+ min_silence_duration_ms: Minimum silence to split segments
234
+ speech_pad_ms: Padding to add to speech segments
235
+ return_seconds: Return times in seconds vs samples
236
+ neg_threshold: Negative threshold for hysteresis (default: threshold - 0.15)
237
+ progress_tracking_callback: Progress callback function
238
+
239
+ Returns:
240
+ List of speech segments with start/end times
241
+ """
242
+ # Convert to numpy if torch tensor
243
+ if torch.is_tensor(audio):
244
+ audio = audio.numpy()
245
+
246
+ # Validate audio
247
+ if audio.ndim > 1:
248
+ audio = audio.mean(axis=0 if audio.shape[0] > audio.shape[1] else 1)
249
+
250
+ # Get frame probabilities for entire audio
251
+ model.reset_states()
252
+ speech_probs = model.audio_forward(audio, sampling_rate)
253
+
254
+ # Calculate frame parameters
255
+ frame_duration_ms = model.frame_duration_ms
256
+ frame_samples = int(sampling_rate * frame_duration_ms / 1000)
257
+
258
+ # Convert durations to frames
259
+ min_speech_frames = int(min_speech_duration_ms / frame_duration_ms)
260
+ min_silence_frames = int(min_silence_duration_ms / frame_duration_ms)
261
+ speech_pad_frames = int(speech_pad_ms / frame_duration_ms)
262
+ max_speech_frames = int(max_speech_duration_s * 1000 / frame_duration_ms) if max_speech_duration_s != float('inf') else len(speech_probs)
263
+
264
+ # Set negative threshold for hysteresis
265
+ if neg_threshold is None:
266
+ neg_threshold = max(threshold - 0.15, 0.01)
267
+
268
+ # Track speech segments
269
+ triggered = False
270
+ speeches = []
271
+ current_speech = {}
272
+ current_probs = [] # Track probabilities for current segment
273
+ temp_end = 0
274
+
275
+ # Process each frame
276
+ for i, speech_prob in enumerate(speech_probs):
277
+ # Report progress
278
+ if progress_tracking_callback:
279
+ progress = (i + 1) / len(speech_probs) * 100
280
+ progress_tracking_callback(progress)
281
+
282
+ # Track probabilities for current segment
283
+ if triggered:
284
+ current_probs.append(float(speech_prob))
285
+
286
+ # Speech onset detection
287
+ if speech_prob >= threshold and not triggered:
288
+ triggered = True
289
+ current_speech['start'] = i
290
+ current_probs = [float(speech_prob)] # Start tracking probabilities
291
+ continue
292
+
293
+ # Check for maximum speech duration
294
+ if triggered and 'start' in current_speech:
295
+ duration = i - current_speech['start']
296
+ if duration > max_speech_frames:
297
+ # Force end segment at max duration
298
+ current_speech['end'] = current_speech['start'] + max_speech_frames
299
+ # Calculate probability statistics for segment
300
+ if current_probs:
301
+ current_speech['avg_prob'] = np.mean(current_probs)
302
+ current_speech['min_prob'] = np.min(current_probs)
303
+ current_speech['max_prob'] = np.max(current_probs)
304
+ speeches.append(current_speech)
305
+ current_speech = {}
306
+ current_probs = []
307
+ triggered = False
308
+ temp_end = 0
309
+ continue
310
+
311
+ # Speech offset detection with hysteresis
312
+ if speech_prob < neg_threshold and triggered:
313
+ if not temp_end:
314
+ temp_end = i
315
+
316
+ # Check if silence is long enough
317
+ if i - temp_end >= min_silence_frames:
318
+ # End current speech segment
319
+ current_speech['end'] = temp_end
320
+
321
+ # Check minimum duration
322
+ if current_speech['end'] - current_speech['start'] >= min_speech_frames:
323
+ # Calculate probability statistics for segment
324
+ if current_probs:
325
+ current_speech['avg_prob'] = np.mean(current_probs[:temp_end - current_speech['start']])
326
+ current_speech['min_prob'] = np.min(current_probs[:temp_end - current_speech['start']])
327
+ current_speech['max_prob'] = np.max(current_probs[:temp_end - current_speech['start']])
328
+ speeches.append(current_speech)
329
+
330
+ current_speech = {}
331
+ current_probs = []
332
+ triggered = False
333
+ temp_end = 0
334
+
335
+ # Reset temp_end if speech resumes
336
+ elif speech_prob >= threshold and temp_end:
337
+ temp_end = 0
338
+
339
+ # Handle speech that continues to the end
340
+ if triggered and 'start' in current_speech:
341
+ current_speech['end'] = len(speech_probs)
342
+ if current_speech['end'] - current_speech['start'] >= min_speech_frames:
343
+ # Calculate probability statistics for segment
344
+ if current_probs:
345
+ current_speech['avg_prob'] = np.mean(current_probs)
346
+ current_speech['min_prob'] = np.min(current_probs)
347
+ current_speech['max_prob'] = np.max(current_probs)
348
+ speeches.append(current_speech)
349
+
350
+ # Apply padding to segments
351
+ for i, speech in enumerate(speeches):
352
+ # Add padding
353
+ if i == 0:
354
+ speech['start'] = max(0, speech['start'] - speech_pad_frames)
355
+ else:
356
+ speech['start'] = max(speeches[i-1]['end'], speech['start'] - speech_pad_frames)
357
+
358
+ if i < len(speeches) - 1:
359
+ speech['end'] = min(speeches[i+1]['start'], speech['end'] + speech_pad_frames)
360
+ else:
361
+ speech['end'] = min(len(speech_probs), speech['end'] + speech_pad_frames)
362
+
363
+ # Convert to time units
364
+ if return_seconds:
365
+ for speech in speeches:
366
+ speech['start'] = speech['start'] * frame_duration_ms / 1000
367
+ speech['end'] = speech['end'] * frame_duration_ms / 1000
368
+ else:
369
+ # Convert frames to samples
370
+ for speech in speeches:
371
+ speech['start'] = speech['start'] * frame_samples
372
+ speech['end'] = speech['end'] * frame_samples
373
+
374
+ return speeches
375
+
376
+
377
+ class VADIterator:
378
+ """Stream iterator for real-time VAD processing (Silero-style)."""
379
+
380
+ def __init__(
381
+ self,
382
+ model,
383
+ threshold: float = 0.5,
384
+ sampling_rate: int = 16000,
385
+ min_silence_duration_ms: int = 100,
386
+ speech_pad_ms: int = 30,
387
+ ):
388
+ """Initialize VAD iterator for streaming.
389
+
390
+ Args:
391
+ model: WhisperVADOnnxWrapper instance
392
+ threshold: Speech detection threshold
393
+ sampling_rate: Audio sample rate
394
+ min_silence_duration_ms: Minimum silence duration
395
+ speech_pad_ms: Speech padding in milliseconds
396
+ """
397
+ self.model = model
398
+ self.threshold = threshold
399
+ self.neg_threshold = max(threshold - 0.15, 0.01)
400
+ self.sampling_rate = sampling_rate
401
+
402
+ # Calculate frame-based parameters
403
+ self.frame_duration_ms = model.frame_duration_ms
404
+ self.min_silence_frames = min_silence_duration_ms / self.frame_duration_ms
405
+ self.speech_pad_frames = speech_pad_ms / self.frame_duration_ms
406
+
407
+ self.reset_states()
408
+
409
+ def reset_states(self):
410
+ """Reset iterator state."""
411
+ self.model.reset_states()
412
+ self.triggered = False
413
+ self.temp_end = 0
414
+ self.current_frame = 0
415
+ self.buffer = np.array([])
416
+ self.speech_start = 0
417
+
418
+ def __call__(self, audio_chunk: np.ndarray, return_seconds: bool = False) -> Optional[Dict]:
419
+ """Process audio chunk and detect speech boundaries.
420
+
421
+ Args:
422
+ audio_chunk: Audio chunk to process
423
+ return_seconds: Return times in seconds vs samples
424
+
425
+ Returns:
426
+ Dict with 'start' or 'end' key when speech boundary detected
427
+ """
428
+ # Add to buffer
429
+ self.buffer = np.concatenate([self.buffer, audio_chunk]) if len(self.buffer) > 0 else audio_chunk
430
+
431
+ # Check if we have enough samples for a full chunk
432
+ if len(self.buffer) < self.model.chunk_samples:
433
+ return None
434
+
435
+ # Process full chunk
436
+ chunk = self.buffer[:self.model.chunk_samples]
437
+ self.buffer = self.buffer[self.model.chunk_samples:]
438
+
439
+ # Get frame predictions
440
+ frame_probs = self.model(chunk, self.sampling_rate)
441
+
442
+ results = []
443
+
444
+ # Process each frame
445
+ for prob in frame_probs:
446
+ self.current_frame += 1
447
+
448
+ # Speech onset
449
+ if prob >= self.threshold and not self.triggered:
450
+ self.triggered = True
451
+ self.speech_start = self.current_frame - self.speech_pad_frames
452
+ start_time = max(0, self.speech_start * self.frame_duration_ms / 1000) if return_seconds else \
453
+ max(0, self.speech_start * self.frame_duration_ms * 16)
454
+ return {'start': start_time}
455
+
456
+ # Speech offset
457
+ if prob < self.neg_threshold and self.triggered:
458
+ if not self.temp_end:
459
+ self.temp_end = self.current_frame
460
+ elif self.current_frame - self.temp_end >= self.min_silence_frames:
461
+ # End speech
462
+ end_frame = self.temp_end + self.speech_pad_frames
463
+ end_time = end_frame * self.frame_duration_ms / 1000 if return_seconds else \
464
+ end_frame * self.frame_duration_ms * 16
465
+ self.triggered = False
466
+ self.temp_end = 0
467
+ return {'end': end_time}
468
+ elif prob >= self.threshold and self.temp_end:
469
+ self.temp_end = 0
470
+
471
+ return None
472
+
473
+
474
+ def load_audio(audio_path: str, sampling_rate: int = 16000) -> np.ndarray:
475
+ """Load audio file and convert to target sample rate.
476
+
477
+ Args:
478
+ audio_path: Path to audio file
479
+ sampling_rate: Target sample rate
480
+
481
+ Returns:
482
+ Audio array at target sample rate
483
+ """
484
+ audio, sr = librosa.load(audio_path, sr=sampling_rate)
485
+ return audio
486
+
487
+
488
+ def save_segments(segments: List[Dict], output_path: str, format: str = 'json'):
489
+ """Save speech segments to file.
490
+
491
+ Args:
492
+ segments: List of speech segments
493
+ output_path: Output file path
494
+ format: Output format (json, txt, csv, srt)
495
+ """
496
+ if format == 'json':
497
+ with open(output_path, 'w') as f:
498
+ json.dump({'segments': segments}, f, indent=2)
499
+
500
+ elif format == 'txt':
501
+ with open(output_path, 'w') as f:
502
+ for i, seg in enumerate(segments, 1):
503
+ start = seg['start']
504
+ end = seg['end']
505
+ duration = end - start
506
+ f.write(f"{i:3d}. {start:8.3f}s - {end:8.3f}s (duration: {duration:6.3f}s)\n")
507
+
508
+ elif format == 'csv':
509
+ import csv
510
+ with open(output_path, 'w', newline='') as f:
511
+ writer = csv.DictWriter(f, fieldnames=['start', 'end', 'duration'])
512
+ writer.writeheader()
513
+ for seg in segments:
514
+ row = {
515
+ 'start': seg['start'],
516
+ 'end': seg['end'],
517
+ 'duration': seg['end'] - seg['start']
518
+ }
519
+ writer.writerow(row)
520
+
521
+ elif format == 'srt':
522
+ with open(output_path, 'w') as f:
523
+ for i, seg in enumerate(segments, 1):
524
+ start_s = seg['start']
525
+ end_s = seg['end']
526
+
527
+ # Convert to SRT timestamp format
528
+ def seconds_to_srt(seconds):
529
+ hours = int(seconds // 3600)
530
+ minutes = int((seconds % 3600) // 60)
531
+ secs = int(seconds % 60)
532
+ millis = int((seconds % 1) * 1000)
533
+ return f"{hours:02d}:{minutes:02d}:{secs:02d},{millis:03d}"
534
+
535
+ f.write(f"{i}\n")
536
+ f.write(f"{seconds_to_srt(start_s)} --> {seconds_to_srt(end_s)}\n")
537
+
538
+ # Write speech probability information if available
539
+ if 'avg_prob' in seg:
540
+ f.write(f"Speech [Avg: {seg['avg_prob']:.2%}, Min: {seg['min_prob']:.2%}, Max: {seg['max_prob']:.2%}]\n\n")
541
+ else:
542
+ f.write(f"[Speech]\n\n")
543
+
544
+
545
+ def main():
546
+ parser = argparse.ArgumentParser(
547
+ description='Silero-style ONNX inference for Whisper-based VAD model'
548
+ )
549
+ parser.add_argument('--model', required=True, help='Path to ONNX model file')
550
+ parser.add_argument('--audio', required=True, help='Path to audio file')
551
+ parser.add_argument('--output', help='Output file path (default: audio_path.vad.json)')
552
+ parser.add_argument('--format', choices=['json', 'txt', 'csv', 'srt'],
553
+ default='json', help='Output format')
554
+ parser.add_argument('--threshold', type=float, default=0.5,
555
+ help='Speech detection threshold (0.0-1.0)')
556
+ parser.add_argument('--neg-threshold', type=float, default=None,
557
+ help='Negative threshold for hysteresis (default: threshold - 0.15)')
558
+ parser.add_argument('--min-speech-duration', type=int, default=250,
559
+ help='Minimum speech duration in ms')
560
+ parser.add_argument('--min-silence-duration', type=int, default=100,
561
+ help='Minimum silence duration in ms')
562
+ parser.add_argument('--speech-pad', type=int, default=30,
563
+ help='Speech padding in ms')
564
+ parser.add_argument('--max-speech-duration', type=float, default=float('inf'),
565
+ help='Maximum speech duration in seconds')
566
+ parser.add_argument('--metadata', help='Path to metadata JSON file')
567
+ parser.add_argument('--force-cpu', action='store_true',
568
+ help='Force CPU execution even if GPU is available')
569
+ parser.add_argument('--threads', type=int, default=1,
570
+ help='Number of CPU threads')
571
+ parser.add_argument('--stream', action='store_true',
572
+ help='Use streaming mode (demonstrate VADIterator)')
573
+
574
+ args = parser.parse_args()
575
+
576
+ # Check files exist
577
+ if not os.path.exists(args.model):
578
+ print(f"Error: Model file not found: {args.model}")
579
+ return 1
580
+
581
+ if not os.path.exists(args.audio):
582
+ print(f"Error: Audio file not found: {args.audio}")
583
+ return 1
584
+
585
+ try:
586
+ # Initialize model
587
+ print("Loading model...")
588
+ model = WhisperVADOnnxWrapper(
589
+ model_path=args.model,
590
+ metadata_path=args.metadata,
591
+ force_cpu=args.force_cpu,
592
+ num_threads=args.threads,
593
+ )
594
+
595
+ # Load audio
596
+ print(f"Loading audio: {args.audio}")
597
+ audio = load_audio(args.audio)
598
+ duration = len(audio) / 16000
599
+ print(f"Audio duration: {duration:.2f}s")
600
+
601
+ if args.stream:
602
+ # Demonstrate streaming mode
603
+ print("\nUsing streaming mode (VADIterator)...")
604
+ vad_iterator = VADIterator(
605
+ model=model,
606
+ threshold=args.threshold,
607
+ min_silence_duration_ms=args.min_silence_duration,
608
+ speech_pad_ms=args.speech_pad,
609
+ )
610
+
611
+ # Simulate streaming by processing in small chunks
612
+ chunk_size = 16000 # 1 second chunks
613
+ segments = []
614
+ current_segment = {}
615
+
616
+ for i in range(0, len(audio), chunk_size):
617
+ chunk = audio[i:i + chunk_size]
618
+ result = vad_iterator(chunk, return_seconds=True)
619
+
620
+ if result:
621
+ if 'start' in result:
622
+ current_segment = {'start': result['start'] + i/16000}
623
+ print(f" Speech started: {current_segment['start']:.2f}s")
624
+ elif 'end' in result and current_segment:
625
+ current_segment['end'] = result['end'] + i/16000
626
+ segments.append(current_segment)
627
+ print(f" Speech ended: {current_segment['end']:.2f}s")
628
+ current_segment = {}
629
+
630
+ # Handle ongoing speech at end
631
+ if current_segment and 'start' in current_segment:
632
+ current_segment['end'] = duration
633
+ segments.append(current_segment)
634
+ else:
635
+ # Use batch mode with Silero-style processing
636
+ print("\nProcessing with Silero-style speech detection...")
637
+
638
+ # Progress callback
639
+ def progress_callback(percent):
640
+ print(f"\rProgress: {percent:.1f}%", end='', flush=True)
641
+
642
+ # Get speech timestamps
643
+ segments = get_speech_timestamps(
644
+ audio=audio,
645
+ model=model,
646
+ threshold=args.threshold,
647
+ sampling_rate=16000,
648
+ min_speech_duration_ms=args.min_speech_duration,
649
+ min_silence_duration_ms=args.min_silence_duration,
650
+ speech_pad_ms=args.speech_pad,
651
+ max_speech_duration_s=args.max_speech_duration,
652
+ return_seconds=True,
653
+ neg_threshold=args.neg_threshold,
654
+ progress_tracking_callback=progress_callback,
655
+ )
656
+ print() # New line after progress
657
+
658
+ # Display results
659
+ print(f"\nFound {len(segments)} speech segments:")
660
+ total_speech = sum(seg['end'] - seg['start'] for seg in segments)
661
+ print(f"Total speech: {total_speech:.2f}s ({total_speech/duration*100:.1f}%)")
662
+
663
+ if segments:
664
+ print("\nSegments:")
665
+ for i, seg in enumerate(segments[:10], 1): # Show first 10
666
+ duration_seg = seg['end'] - seg['start']
667
+ print(f" {i:2d}. {seg['start']:7.3f}s - {seg['end']:7.3f}s (duration: {duration_seg:5.3f}s)")
668
+ if len(segments) > 10:
669
+ print(f" ... and {len(segments) - 10} more segments")
670
+
671
+ # Save results
672
+ output_path = args.output
673
+ if not output_path:
674
+ base = os.path.splitext(args.audio)[0]
675
+ output_path = f"{base}.vad.{args.format}"
676
+
677
+ save_segments(segments, output_path, format=args.format)
678
+ print(f"\nResults saved to: {output_path}")
679
+
680
+ except Exception as e:
681
+ print(f"Error: {e}")
682
+ import traceback
683
+ traceback.print_exc()
684
+ return 1
685
+
686
+ return 0
687
+
688
+
689
+ if __name__ == '__main__':
690
+ exit(main())
model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd47513515766d57f740e3094440dbbca9ab87e026b9cf21540d7ad588c0e047
3
+ size 119137398
model_metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "encoder_decoder",
3
+ "whisper_model_name": "openai/whisper-base",
4
+ "decoder_layers": 2,
5
+ "decoder_heads": 8,
6
+ "input_shape": [
7
+ 1,
8
+ 80,
9
+ 3000
10
+ ],
11
+ "output_shape": [
12
+ 1,
13
+ 1500
14
+ ],
15
+ "frame_duration_ms": 20,
16
+ "total_duration_ms": 30000,
17
+ "opset_version": 17,
18
+ "export_batch_size": 1,
19
+ "config_path": "",
20
+ "checkpoint_path": ""
21
+ }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ onnxruntime>=1.16.0 # or onnxruntime-gpu for GPU support
2
+ transformers>=4.30.0 # For WhisperFeatureExtractor
3
+ librosa>=0.10.0 # Audio processing
4
+ soundfile>=0.12.0 # Audio I/O (required by librosa)
5
+ numpy>=1.24.0 # Array operations