mazesmazes commited on
Commit
6fbb3b5
·
verified ·
1 Parent(s): 08c708e

Update custom model files, README, and requirements

Browse files
Files changed (1) hide show
  1. asr_pipeline.py +1 -222
asr_pipeline.py CHANGED
@@ -30,12 +30,6 @@ class ForcedAligner:
30
  _model = None
31
  _labels = None
32
  _dictionary = None
33
- _vad_model = None
34
-
35
- # VAD parameters
36
- VAD_HOP_SIZE = 256 # TEN-VAD frame size (16ms at 16kHz)
37
- VAD_THRESHOLD = 0.5 # Speech detection threshold
38
- VAD_MAX_GAP = 0.15 # Max gap to merge speech segments (seconds)
39
 
40
  @classmethod
41
  def get_instance(cls, device: str = "cuda"):
@@ -57,135 +51,6 @@ class ForcedAligner:
57
  cls._dictionary = {c: i for i, c in enumerate(cls._labels)}
58
  return cls._model, cls._labels, cls._dictionary
59
 
60
- @classmethod
61
- def _get_vad_model(cls):
62
- """Lazy-load TEN-VAD model (singleton)."""
63
- if cls._vad_model is None:
64
- from ten_vad import TenVad
65
-
66
- cls._vad_model = TenVad(hop_size=cls.VAD_HOP_SIZE, threshold=cls.VAD_THRESHOLD)
67
- return cls._vad_model
68
-
69
- @classmethod
70
- def _get_speech_regions(
71
- cls, audio: np.ndarray, sample_rate: int = 16000
72
- ) -> list[tuple[float, float]]:
73
- """Get speech regions using TEN-VAD.
74
-
75
- Args:
76
- audio: Audio waveform as numpy array
77
- sample_rate: Audio sample rate
78
-
79
- Returns:
80
- List of (start_time, end_time) tuples for speech regions
81
- """
82
- vad_model = cls._get_vad_model()
83
-
84
- # Convert to int16 as required by TEN-VAD
85
- if audio.dtype != np.int16:
86
- audio_int16 = (np.clip(audio, -1.0, 1.0) * 32767).astype(np.int16)
87
- else:
88
- audio_int16 = audio
89
-
90
- # Process frame by frame
91
- hop_size = cls.VAD_HOP_SIZE
92
- frame_duration = hop_size / sample_rate
93
- speech_frames: list[bool] = []
94
-
95
- for i in range(0, len(audio_int16) - hop_size, hop_size):
96
- frame = audio_int16[i : i + hop_size]
97
- _, is_speech = vad_model.process(frame)
98
- speech_frames.append(is_speech)
99
-
100
- # Convert frame-level decisions to segments
101
- segments: list[tuple[float, float]] = []
102
- in_speech = False
103
- start_idx = 0
104
-
105
- for i, is_speech in enumerate(speech_frames):
106
- if is_speech and not in_speech:
107
- start_idx = i
108
- in_speech = True
109
- elif not is_speech and in_speech:
110
- start_time = start_idx * frame_duration
111
- end_time = i * frame_duration
112
- segments.append((start_time, end_time))
113
- in_speech = False
114
-
115
- # Handle trailing speech
116
- if in_speech:
117
- start_time = start_idx * frame_duration
118
- end_time = len(speech_frames) * frame_duration
119
- segments.append((start_time, end_time))
120
-
121
- # Merge segments with small gaps
122
- return cls._merge_speech_segments(segments)
123
-
124
- @classmethod
125
- def _merge_speech_segments(
126
- cls, segments: list[tuple[float, float]]
127
- ) -> list[tuple[float, float]]:
128
- """Merge speech segments with small gaps."""
129
- if not segments:
130
- return segments
131
-
132
- merged: list[tuple[float, float]] = [segments[0]]
133
- for start, end in segments[1:]:
134
- prev_start, prev_end = merged[-1]
135
- if start - prev_end <= cls.VAD_MAX_GAP:
136
- merged[-1] = (prev_start, end)
137
- else:
138
- merged.append((start, end))
139
- return merged
140
-
141
- @classmethod
142
- def _is_in_speech(cls, time: float, speech_regions: list[tuple[float, float]]) -> bool:
143
- """Check if a timestamp falls within any speech region."""
144
- return any(start <= time <= end for start, end in speech_regions)
145
-
146
- @classmethod
147
- def _find_nearest_speech_boundary(
148
- cls, time: float, speech_regions: list[tuple[float, float]], direction: str = "any"
149
- ) -> float:
150
- """Find the nearest speech region boundary to a timestamp.
151
-
152
- Args:
153
- time: Timestamp to find boundary for
154
- speech_regions: List of (start, end) speech regions
155
- direction: "start" for word starts, "end" for word ends, "any" for closest
156
-
157
- Returns:
158
- Adjusted timestamp snapped to nearest speech boundary
159
- """
160
- if not speech_regions:
161
- return time
162
-
163
- best_time = time
164
- min_dist = float("inf")
165
-
166
- for start, end in speech_regions:
167
- # If time is inside this region, return as-is
168
- if start <= time <= end:
169
- return time
170
-
171
- # Check distance to boundaries
172
- if direction in ("start", "any"):
173
- dist = abs(time - start)
174
- if dist < min_dist:
175
- min_dist = dist
176
- best_time = start
177
-
178
- if direction in ("end", "any"):
179
- dist = abs(time - end)
180
- if dist < min_dist:
181
- min_dist = dist
182
- best_time = end
183
-
184
- return best_time
185
-
186
- # Confidence threshold for alignment scores (log probability)
187
- MIN_CONFIDENCE = -5.0 # Tokens with scores below this are considered low-confidence
188
-
189
  @classmethod
190
  def align(
191
  cls,
@@ -194,7 +59,6 @@ class ForcedAligner:
194
  sample_rate: int = 16000,
195
  _language: str = "eng",
196
  _batch_size: int = 16,
197
- use_vad: bool = True,
198
  ) -> list[dict]:
199
  """Align transcript to audio and return word-level timestamps.
200
 
@@ -204,10 +68,9 @@ class ForcedAligner:
204
  sample_rate: Audio sample rate (default 16000)
205
  _language: ISO-639-3 language code (default "eng" for English, unused)
206
  _batch_size: Batch size for alignment model (unused)
207
- use_vad: If True, use VAD to refine word boundaries (default True)
208
 
209
  Returns:
210
- List of dicts with 'word', 'start', 'end', 'confidence' keys
211
  """
212
  import torchaudio
213
  from torchaudio.functional import forced_align, merge_tokens
@@ -215,11 +78,6 @@ class ForcedAligner:
215
  device = _get_device()
216
  model, labels, dictionary = cls.get_instance(device)
217
 
218
- # Step 1: Get speech regions using VAD (before any processing)
219
- speech_regions = []
220
- if use_vad:
221
- speech_regions = cls._get_speech_regions(audio, sample_rate)
222
-
223
  # Convert audio to tensor (copy to ensure array is writable)
224
  if isinstance(audio, np.ndarray):
225
  waveform = torch.from_numpy(audio.copy()).float()
@@ -272,122 +130,43 @@ class ForcedAligner:
272
  frame_duration = 320 / cls._bundle.sample_rate
273
 
274
  # Group token spans into words based on pipe separator
275
- # Track confidence scores per word
276
  words = text.split()
277
  word_timestamps = []
278
  current_word_start = None
279
  current_word_end = None
280
- current_word_scores: list[float] = []
281
  word_idx = 0
282
 
283
  for span in token_spans:
284
  token_char = labels[span.token]
285
  if token_char == "|": # Word separator
286
  if current_word_start is not None and word_idx < len(words):
287
- # Calculate word confidence as mean of token scores
288
- confidence = (
289
- sum(current_word_scores) / len(current_word_scores)
290
- if current_word_scores
291
- else 0.0
292
- )
293
  word_timestamps.append(
294
  {
295
  "word": words[word_idx],
296
  "start": current_word_start * frame_duration,
297
  "end": current_word_end * frame_duration,
298
- "confidence": confidence,
299
  }
300
  )
301
  word_idx += 1
302
  current_word_start = None
303
  current_word_end = None
304
- current_word_scores = []
305
  else:
306
  if current_word_start is None:
307
  current_word_start = span.start
308
  current_word_end = span.end
309
- current_word_scores.append(span.score)
310
 
311
  # Don't forget the last word
312
  if current_word_start is not None and word_idx < len(words):
313
- confidence = (
314
- sum(current_word_scores) / len(current_word_scores) if current_word_scores else 0.0
315
- )
316
  word_timestamps.append(
317
  {
318
  "word": words[word_idx],
319
  "start": current_word_start * frame_duration,
320
  "end": current_word_end * frame_duration,
321
- "confidence": confidence,
322
  }
323
  )
324
 
325
- # Step 2: Refine timestamps using VAD
326
- if use_vad and speech_regions:
327
- word_timestamps = cls._refine_with_vad(word_timestamps, speech_regions)
328
-
329
  return word_timestamps
330
 
331
- @classmethod
332
- def _refine_with_vad(
333
- cls, word_timestamps: list[dict], speech_regions: list[tuple[float, float]]
334
- ) -> list[dict]:
335
- """Refine word timestamps using VAD speech regions.
336
-
337
- - Words with low confidence that fall outside speech regions are flagged
338
- - Word boundaries are snapped to speech region boundaries when close
339
-
340
- Args:
341
- word_timestamps: List of word dicts with 'start', 'end', 'confidence'
342
- speech_regions: List of (start, end) speech regions
343
-
344
- Returns:
345
- Refined word timestamps
346
- """
347
- if not word_timestamps or not speech_regions:
348
- return word_timestamps
349
-
350
- refined = []
351
- for word in word_timestamps:
352
- start = word["start"]
353
- end = word["end"]
354
- confidence = word.get("confidence", 0.0)
355
-
356
- # Check if word midpoint is in a speech region
357
- midpoint = (start + end) / 2
358
- in_speech = cls._is_in_speech(midpoint, speech_regions)
359
-
360
- # For low-confidence words outside speech, snap to nearest speech boundary
361
- if not in_speech and confidence < cls.MIN_CONFIDENCE:
362
- # Find the nearest speech region and snap boundaries
363
- start = cls._find_nearest_speech_boundary(start, speech_regions, "start")
364
- end = cls._find_nearest_speech_boundary(end, speech_regions, "end")
365
- # Ensure start < end
366
- if start >= end:
367
- end = start + 0.01
368
-
369
- # For words near speech boundaries, snap to the boundary
370
- # This helps align word edges with actual speech onset/offset
371
- snap_threshold = 0.05 # 50ms
372
- for region_start, region_end in speech_regions:
373
- # Snap start to speech region start if close
374
- if abs(start - region_start) < snap_threshold:
375
- start = region_start
376
- # Snap end to speech region end if close
377
- if abs(end - region_end) < snap_threshold:
378
- end = region_end
379
-
380
- refined.append(
381
- {
382
- "word": word["word"],
383
- "start": start,
384
- "end": end,
385
- "confidence": confidence,
386
- }
387
- )
388
-
389
- return refined
390
-
391
 
392
  try:
393
  from .diarization import SpeakerDiarizer
 
30
  _model = None
31
  _labels = None
32
  _dictionary = None
 
 
 
 
 
 
33
 
34
  @classmethod
35
  def get_instance(cls, device: str = "cuda"):
 
51
  cls._dictionary = {c: i for i, c in enumerate(cls._labels)}
52
  return cls._model, cls._labels, cls._dictionary
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  @classmethod
55
  def align(
56
  cls,
 
59
  sample_rate: int = 16000,
60
  _language: str = "eng",
61
  _batch_size: int = 16,
 
62
  ) -> list[dict]:
63
  """Align transcript to audio and return word-level timestamps.
64
 
 
68
  sample_rate: Audio sample rate (default 16000)
69
  _language: ISO-639-3 language code (default "eng" for English, unused)
70
  _batch_size: Batch size for alignment model (unused)
 
71
 
72
  Returns:
73
+ List of dicts with 'word', 'start', 'end' keys
74
  """
75
  import torchaudio
76
  from torchaudio.functional import forced_align, merge_tokens
 
78
  device = _get_device()
79
  model, labels, dictionary = cls.get_instance(device)
80
 
 
 
 
 
 
81
  # Convert audio to tensor (copy to ensure array is writable)
82
  if isinstance(audio, np.ndarray):
83
  waveform = torch.from_numpy(audio.copy()).float()
 
130
  frame_duration = 320 / cls._bundle.sample_rate
131
 
132
  # Group token spans into words based on pipe separator
 
133
  words = text.split()
134
  word_timestamps = []
135
  current_word_start = None
136
  current_word_end = None
 
137
  word_idx = 0
138
 
139
  for span in token_spans:
140
  token_char = labels[span.token]
141
  if token_char == "|": # Word separator
142
  if current_word_start is not None and word_idx < len(words):
 
 
 
 
 
 
143
  word_timestamps.append(
144
  {
145
  "word": words[word_idx],
146
  "start": current_word_start * frame_duration,
147
  "end": current_word_end * frame_duration,
 
148
  }
149
  )
150
  word_idx += 1
151
  current_word_start = None
152
  current_word_end = None
 
153
  else:
154
  if current_word_start is None:
155
  current_word_start = span.start
156
  current_word_end = span.end
 
157
 
158
  # Don't forget the last word
159
  if current_word_start is not None and word_idx < len(words):
 
 
 
160
  word_timestamps.append(
161
  {
162
  "word": words[word_idx],
163
  "start": current_word_start * frame_duration,
164
  "end": current_word_end * frame_duration,
 
165
  }
166
  )
167
 
 
 
 
 
168
  return word_timestamps
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  try:
172
  from .diarization import SpeakerDiarizer