Xin Zhang
commited on
Commit
·
1b8024b
1
Parent(s):
632b469
[fix]: whisper_full_with_state: input is too short - 990 ms < 1000 ms. consider padding the input audio with silence.
Browse files- transcribe/serve.py +17 -7
transcribe/serve.py
CHANGED
|
@@ -92,7 +92,7 @@ class WhisperTranscriptionService:
|
|
| 92 |
self.full_segments_queue.appendleft(audio_array) # 根据时间是否满足三秒长度 来整合音频块
|
| 93 |
self.frames_np_start_timestamp = time.time()
|
| 94 |
self.frames_np = np.array([], dtype=np.float32)
|
| 95 |
-
|
| 96 |
# 音频结束信号的时候 整合当前缓冲区
|
| 97 |
# START -- END -- START -- END 通常
|
| 98 |
# START -- END -- END end块带有音频信息的通常是4096内断的一个短音
|
|
@@ -105,7 +105,7 @@ class WhisperTranscriptionService:
|
|
| 105 |
self.frames_np = np.array([], dtype=np.float32)
|
| 106 |
else:
|
| 107 |
logger.debug(f"🥳 当前时间与上一句的时间差: {time_diff:.2f}s,继续保留在缓冲区")
|
| 108 |
-
|
| 109 |
except queue.Empty:
|
| 110 |
pass
|
| 111 |
|
|
@@ -114,7 +114,7 @@ class WhisperTranscriptionService:
|
|
| 114 |
frame_epoch = 1
|
| 115 |
|
| 116 |
while not self._stop.is_set():
|
| 117 |
-
|
| 118 |
if len(self.frames_np) ==0:
|
| 119 |
time.sleep(0.1)
|
| 120 |
continue
|
|
@@ -126,11 +126,21 @@ class WhisperTranscriptionService:
|
|
| 126 |
else:
|
| 127 |
audio_buffer = self.frames_np[:int(frame_epoch * 1.5 * self.sample_rate)].copy()# 获取 1.5s * epoch 个音频长度
|
| 128 |
partial = True
|
| 129 |
-
|
| 130 |
if len(audio_buffer) < int(self.sample_rate):
|
| 131 |
-
|
| 132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
audio_buffer = silence_audio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
logger.debug(f"audio buffer size: {len(audio_buffer) / self.sample_rate:.2f}s")
|
| 136 |
meta_item = self._transcribe_audio(audio_buffer)
|
|
@@ -177,7 +187,7 @@ class WhisperTranscriptionService:
|
|
| 177 |
|
| 178 |
result = self.translate_pipe.translate(text, self.source_language, self.target_language)
|
| 179 |
translated_text = result.translate_content
|
| 180 |
-
|
| 181 |
log_block("🐧 Translation out ", f"{translated_text}")
|
| 182 |
return translated_text
|
| 183 |
|
|
|
|
| 92 |
self.full_segments_queue.appendleft(audio_array) # 根据时间是否满足三秒长度 来整合音频块
|
| 93 |
self.frames_np_start_timestamp = time.time()
|
| 94 |
self.frames_np = np.array([], dtype=np.float32)
|
| 95 |
+
|
| 96 |
# 音频结束信号的时候 整合当前缓冲区
|
| 97 |
# START -- END -- START -- END 通常
|
| 98 |
# START -- END -- END end块带有音频信息的通常是4096内断的一个短音
|
|
|
|
| 105 |
self.frames_np = np.array([], dtype=np.float32)
|
| 106 |
else:
|
| 107 |
logger.debug(f"🥳 当前时间与上一句的时间差: {time_diff:.2f}s,继续保留在缓冲区")
|
| 108 |
+
|
| 109 |
except queue.Empty:
|
| 110 |
pass
|
| 111 |
|
|
|
|
| 114 |
frame_epoch = 1
|
| 115 |
|
| 116 |
while not self._stop.is_set():
|
| 117 |
+
|
| 118 |
if len(self.frames_np) ==0:
|
| 119 |
time.sleep(0.1)
|
| 120 |
continue
|
|
|
|
| 126 |
else:
|
| 127 |
audio_buffer = self.frames_np[:int(frame_epoch * 1.5 * self.sample_rate)].copy()# 获取 1.5s * epoch 个音频长度
|
| 128 |
partial = True
|
| 129 |
+
|
| 130 |
if len(audio_buffer) < int(self.sample_rate):
|
| 131 |
+
# Add a small buffer (e.g., 10ms worth of samples) to be safe
|
| 132 |
+
padding_samples = int(self.sample_rate * 0.01) # e.g., 160 samples for 10ms at 16kHz
|
| 133 |
+
target_length = self.sample_rate + padding_samples
|
| 134 |
+
silence_audio = np.zeros(target_length, dtype=np.float32)
|
| 135 |
+
# Ensure we don't try to copy more data than exists if audio_buffer is very short
|
| 136 |
+
copy_length = min(len(audio_buffer), target_length)
|
| 137 |
+
silence_audio[-copy_length:] = audio_buffer[-copy_length:] # Copy from the end of audio_buffer
|
| 138 |
audio_buffer = silence_audio
|
| 139 |
+
elif len(audio_buffer) > self.sample_rate * config.MAX_SPEECH_DURATION_S:
|
| 140 |
+
# If buffer is too long even without padding, truncate it (optional, depends on desired behavior)
|
| 141 |
+
# This case might already be handled elsewhere, but good to consider
|
| 142 |
+
audio_buffer = audio_buffer[:int(self.sample_rate * config.MAX_SPEECH_DURATION_S)]
|
| 143 |
+
|
| 144 |
|
| 145 |
logger.debug(f"audio buffer size: {len(audio_buffer) / self.sample_rate:.2f}s")
|
| 146 |
meta_item = self._transcribe_audio(audio_buffer)
|
|
|
|
| 187 |
|
| 188 |
result = self.translate_pipe.translate(text, self.source_language, self.target_language)
|
| 189 |
translated_text = result.translate_content
|
| 190 |
+
|
| 191 |
log_block("🐧 Translation out ", f"{translated_text}")
|
| 192 |
return translated_text
|
| 193 |
|