daihui.zhang commited on
Commit ·
c4470f1
1
Parent(s): 6696134
vad parameters v1 test
Browse files- transcribe/pipelines/pipe_vad.py +18 -17
- transcribe/strategy.py +1 -1
- transcribe/whisper_llm_serve.py +6 -6
transcribe/pipelines/pipe_vad.py
CHANGED
|
@@ -29,7 +29,7 @@ def collect_chunks(tss: List[dict], wav: torch.Tensor, sample_rate: int = 16000)
|
|
| 29 |
def collect_chunks_improved(tss: List[dict], wav: torch.Tensor, sample_rate: int = 16000):
|
| 30 |
chunks = []
|
| 31 |
silent_samples = int(0.3 * sample_rate) # 300ms 的静音样本数
|
| 32 |
-
silence = torch.zeros(silent_samples) # 创建300ms的静音
|
| 33 |
min_gap_samples = int(0.1 * sample_rate) # 最小间隔阈值 (100ms)
|
| 34 |
|
| 35 |
# 对时间戳进行简单的平滑处理
|
|
@@ -74,22 +74,22 @@ class VadPipe(BasePipe):
|
|
| 74 |
def init(cls):
|
| 75 |
if cls.model is None:
|
| 76 |
cls.model = SileroVADProcessor(
|
| 77 |
-
activate_threshold=0.
|
| 78 |
fusion_threshold=0.45, # 提高以更好地融合语音片段
|
| 79 |
min_speech_duration=0.2, # 略微降低以捕获短音节
|
| 80 |
max_speech_duration=20, # 保持不变
|
| 81 |
-
min_silence_duration=
|
| 82 |
sample_rate=cls.sample_rate # 采样率,音频信号的采样频率
|
| 83 |
)
|
| 84 |
-
cls.vac = FixedVADIterator(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
cls.vac.reset_states()
|
| 86 |
|
| 87 |
-
|
| 88 |
-
def get_previous_buffer(self):
|
| 89 |
-
if len(self.previous_buffer) == 2:
|
| 90 |
-
return self.previous_buffer[-1]
|
| 91 |
-
return np.array([], dtype=np.float32)
|
| 92 |
-
|
| 93 |
|
| 94 |
# def reduce_noise(self, data):
|
| 95 |
# return nr.reduce_noise(y=data, sr=self.sample_rate)
|
|
@@ -109,10 +109,10 @@ class VadPipe(BasePipe):
|
|
| 109 |
def process(self, in_data: MetaItem) -> MetaItem:
|
| 110 |
if self._offset == 0:
|
| 111 |
self.vac.reset_states()
|
| 112 |
-
|
| 113 |
source_audio = np.frombuffer(in_data.source_audio, dtype=np.float32)
|
| 114 |
speech_data = self._process_speech_chunk(source_audio)
|
| 115 |
-
|
| 116 |
if speech_data: # 表示有音频的变化点出现
|
| 117 |
rel_start_frame, rel_end_frame = speech_data
|
| 118 |
if rel_start_frame and not rel_end_frame:
|
|
@@ -130,16 +130,17 @@ class VadPipe(BasePipe):
|
|
| 130 |
else:
|
| 131 |
self._status = 'END'
|
| 132 |
target_audio = np.array([],dtype=np.float32)
|
| 133 |
-
logging.debug("❌ No valid speech segment detected, setting status to END")
|
| 134 |
else:
|
| 135 |
if self._status == 'START':
|
| 136 |
target_audio = source_audio
|
| 137 |
-
logging.debug("🔊 Continuing to capture audio as speech is still ongoing")
|
| 138 |
else: # end
|
| 139 |
target_audio = np.array([],dtype=np.float32)
|
| 140 |
-
self._status = 'END'
|
| 141 |
-
logging.debug("❌ No speech detected, setting status to END")
|
| 142 |
-
|
|
|
|
| 143 |
|
| 144 |
in_data.audio = target_audio.tobytes()
|
| 145 |
in_data.source_audio = b''
|
|
|
|
| 29 |
def collect_chunks_improved(tss: List[dict], wav: torch.Tensor, sample_rate: int = 16000):
|
| 30 |
chunks = []
|
| 31 |
silent_samples = int(0.3 * sample_rate) # 300ms 的静音样本数
|
| 32 |
+
# silence = torch.zeros(silent_samples) # 创建300ms的静音
|
| 33 |
min_gap_samples = int(0.1 * sample_rate) # 最小间隔阈值 (100ms)
|
| 34 |
|
| 35 |
# 对时间戳进行简单的平滑处理
|
|
|
|
| 74 |
def init(cls):
|
| 75 |
if cls.model is None:
|
| 76 |
cls.model = SileroVADProcessor(
|
| 77 |
+
activate_threshold=0.3, # 降低以捕获更多音频
|
| 78 |
fusion_threshold=0.45, # 提高以更好地融合语音片段
|
| 79 |
min_speech_duration=0.2, # 略微降低以捕获短音节
|
| 80 |
max_speech_duration=20, # 保持不变
|
| 81 |
+
min_silence_duration=500, # 增加到300毫秒,允许说话间的自然停顿
|
| 82 |
sample_rate=cls.sample_rate # 采样率,音频信号的采样频率
|
| 83 |
)
|
| 84 |
+
cls.vac = FixedVADIterator(
|
| 85 |
+
cls.model.silero_vad,
|
| 86 |
+
sampling_rate=cls.sample_rate,
|
| 87 |
+
threshold=0.3,
|
| 88 |
+
speech_pad_ms=10
|
| 89 |
+
)
|
| 90 |
cls.vac.reset_states()
|
| 91 |
|
| 92 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
# def reduce_noise(self, data):
|
| 95 |
# return nr.reduce_noise(y=data, sr=self.sample_rate)
|
|
|
|
| 109 |
def process(self, in_data: MetaItem) -> MetaItem:
|
| 110 |
if self._offset == 0:
|
| 111 |
self.vac.reset_states()
|
| 112 |
+
silence_audio_100ms = np.zeros(int(0.1*self.sample_rate))
|
| 113 |
source_audio = np.frombuffer(in_data.source_audio, dtype=np.float32)
|
| 114 |
speech_data = self._process_speech_chunk(source_audio)
|
| 115 |
+
|
| 116 |
if speech_data: # 表示有音频的变化点出现
|
| 117 |
rel_start_frame, rel_end_frame = speech_data
|
| 118 |
if rel_start_frame and not rel_end_frame:
|
|
|
|
| 130 |
else:
|
| 131 |
self._status = 'END'
|
| 132 |
target_audio = np.array([],dtype=np.float32)
|
| 133 |
+
# logging.debug("❌ No valid speech segment detected, setting status to END")
|
| 134 |
else:
|
| 135 |
if self._status == 'START':
|
| 136 |
target_audio = source_audio
|
| 137 |
+
# logging.debug("🔊 Continuing to capture audio as speech is still ongoing")
|
| 138 |
else: # end
|
| 139 |
target_audio = np.array([],dtype=np.float32)
|
| 140 |
+
# self._status = 'END'
|
| 141 |
+
# logging.debug("❌ No speech detected, setting status to END")
|
| 142 |
+
|
| 143 |
+
self._offset += len(source_audio)
|
| 144 |
|
| 145 |
in_data.audio = target_audio.tobytes()
|
| 146 |
in_data.source_audio = b''
|
transcribe/strategy.py
CHANGED
|
@@ -111,7 +111,7 @@ class TranscriptChunk:
|
|
| 111 |
return 0
|
| 112 |
|
| 113 |
score = self._calculate_similarity(self.join(), chunk.join())
|
| 114 |
-
logger.debug(f"Compare: {self.join()} vs {chunk.join()} : {score}")
|
| 115 |
return score
|
| 116 |
|
| 117 |
def only_punctuation(self)->bool:
|
|
|
|
| 111 |
return 0
|
| 112 |
|
| 113 |
score = self._calculate_similarity(self.join(), chunk.join())
|
| 114 |
+
# logger.debug(f"Compare: {self.join()} vs {chunk.join()} : {score}")
|
| 115 |
return score
|
| 116 |
|
| 117 |
def only_punctuation(self)->bool:
|
transcribe/whisper_llm_serve.py
CHANGED
|
@@ -159,15 +159,15 @@ class WhisperTranscriptionService:
|
|
| 159 |
frames = frame_np.copy()
|
| 160 |
|
| 161 |
# 音频过短时的处理
|
| 162 |
-
if len(frames) <
|
| 163 |
# 极短音频段,清空并返回None
|
| 164 |
# self._update_audio_buffer(len(frames))
|
| 165 |
return None
|
| 166 |
-
if len(frames) < self.sample_rate:
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
|
| 172 |
return frames
|
| 173 |
|
|
|
|
| 159 |
frames = frame_np.copy()
|
| 160 |
|
| 161 |
# 音频过短时的处理
|
| 162 |
+
if len(frames) < self.sample_rate:
|
| 163 |
# 极短音频段,清空并返回None
|
| 164 |
# self._update_audio_buffer(len(frames))
|
| 165 |
return None
|
| 166 |
+
# if len(frames) < self.sample_rate:
|
| 167 |
+
# # 不足一秒的音频,补充静音
|
| 168 |
+
# silence_audio = np.zeros((self.sample_rate + 1000,), dtype=np.float32)
|
| 169 |
+
# silence_audio[-len(frames):] = frames
|
| 170 |
+
# return silence_audio.copy()
|
| 171 |
|
| 172 |
return frames
|
| 173 |
|