david commited on
Commit
352d0e5
·
1 Parent(s): 9608722

fix vad bug

Browse files
transcribe/pipelines/pipe_vad.py CHANGED
@@ -42,10 +42,12 @@ class VadPipe(BasePipe):
42
  source_audio = in_data.source_audio
43
  source_audio = np.frombuffer(source_audio, dtype=np.float32)
44
  send_audio = b""
45
- speech_timestamps = get_speech_timestamps(source_audio, self.model.silero_vad, sampling_rate=16000)
 
46
  if speech_timestamps:
47
  send_audio = collect_chunks(speech_timestamps, torch.Tensor(source_audio))
48
  send_audio = send_audio.numpy()
 
49
  # send_audio = self.reduce_noise(send_audio).tobytes()
50
  in_data.source_audio = b""
51
  return in_data
 
42
  source_audio = in_data.source_audio
43
  source_audio = np.frombuffer(source_audio, dtype=np.float32)
44
  send_audio = b""
45
+ speech_timestamps = get_speech_timestamps(torch.Tensor(source_audio), self.model.silero_vad, sampling_rate=16000)
46
+
47
  if speech_timestamps:
48
  send_audio = collect_chunks(speech_timestamps, torch.Tensor(source_audio))
49
  send_audio = send_audio.numpy()
50
+ in_data.audio = send_audio
51
  # send_audio = self.reduce_noise(send_audio).tobytes()
52
  in_data.source_audio = b""
53
  return in_data
transcribe/whisper_llm_serve.py CHANGED
@@ -68,9 +68,8 @@ class PyWhiperCppServe(ServeClientBase):
68
  with self.lock:
69
  frame = self.frames_np.copy()
70
  item = self._translate_pipes.voice_detect(frame.tobytes())
71
- if item.audio != b'':
72
- frame_np = np.frombuffer(item.audio, dtype=np.float32)
73
- self.frames_np = frame_np.copy()
74
 
75
 
76
  def get_frame_from_queue(self,):
@@ -103,6 +102,7 @@ class PyWhiperCppServe(ServeClientBase):
103
 
104
  item = self._translate_pipes.transcrible(audio_buffer.tobytes(), self.language)
105
  segments = item.segments
 
106
  log_block("Whisper transcrible time", f"{(time.perf_counter() - start_time):.3f}", "s")
107
 
108
  return segments
@@ -184,6 +184,7 @@ class PyWhiperCppServe(ServeClientBase):
184
  if last_cut_index:
185
  self.update_audio_buffer(last_cut_index)
186
  # 句子或者短句的提交
 
187
  self._segment_manager.handle(left_string).commit(is_end_sentence)
188
  self._segment_manager.handle(right_string)
189
 
 
68
  with self.lock:
69
  frame = self.frames_np.copy()
70
  item = self._translate_pipes.voice_detect(frame.tobytes())
71
+ frame_np = np.frombuffer(item.audio, dtype=np.float32)
72
+ self.frames_np = frame_np.copy()
 
73
 
74
 
75
  def get_frame_from_queue(self,):
 
102
 
103
  item = self._translate_pipes.transcrible(audio_buffer.tobytes(), self.language)
104
  segments = item.segments
105
+ log_block("Whisper transcrible out", f"{''.join(seg.text for seg in segments)}", "")
106
  log_block("Whisper transcrible time", f"{(time.perf_counter() - start_time):.3f}", "s")
107
 
108
  return segments
 
184
  if last_cut_index:
185
  self.update_audio_buffer(last_cut_index)
186
  # 句子或者短句的提交
187
+ log_block("Whisper string lock ", f"{left_string}",)
188
  self._segment_manager.handle(left_string).commit(is_end_sentence)
189
  self._segment_manager.handle(right_string)
190