liumaolin
commited on
Commit
·
2c7e742
1
Parent(s):
e3d17e2
更新音频任务处理逻辑,使用模型副本替代原始任务对象,以确保数据一致性和完整性。
Browse files
src/voice_dialogue/services/audio/generator.py
CHANGED
|
@@ -103,4 +103,4 @@ class TTSAudioGenerator(BaseThread, TaskStatusMixin):
|
|
| 103 |
voice_task.tts_generated_sentence_audio = tts_generated_sentence_audio
|
| 104 |
voice_task.tts_end_time = time.time()
|
| 105 |
|
| 106 |
-
self.audio_output_queue.put(voice_task)
|
|
|
|
| 103 |
voice_task.tts_generated_sentence_audio = tts_generated_sentence_audio
|
| 104 |
voice_task.tts_end_time = time.time()
|
| 105 |
|
| 106 |
+
self.audio_output_queue.put(voice_task.model_copy())
|
src/voice_dialogue/services/speech/monitor.py
CHANGED
|
@@ -291,7 +291,7 @@ class SpeechStateMonitor(BaseThread):
|
|
| 291 |
# 8. 检查是否需要发送语音任务
|
| 292 |
if self._should_send_voice_task(is_audio_sent_for_processing):
|
| 293 |
voice_task = self._create_voice_task(audio_frames)
|
| 294 |
-
self.user_voice_queue.put(voice_task)
|
| 295 |
|
| 296 |
# 更新状态
|
| 297 |
is_audio_sent_for_processing = True
|
|
|
|
| 291 |
# 8. 检查是否需要发送语音任务
|
| 292 |
if self._should_send_voice_task(is_audio_sent_for_processing):
|
| 293 |
voice_task = self._create_voice_task(audio_frames)
|
| 294 |
+
self.user_voice_queue.put(voice_task.model_copy(deep=True))
|
| 295 |
|
| 296 |
# 更新状态
|
| 297 |
is_audio_sent_for_processing = True
|
src/voice_dialogue/services/speech/recognizer.py
CHANGED
|
@@ -38,7 +38,6 @@ class ASRWorker(BaseThread, PerformanceLogMixin):
|
|
| 38 |
except Empty:
|
| 39 |
continue
|
| 40 |
|
| 41 |
-
|
| 42 |
voice_task.language = self.language
|
| 43 |
voice_task.whisper_start_time = time.time()
|
| 44 |
|
|
@@ -47,16 +46,16 @@ class ASRWorker(BaseThread, PerformanceLogMixin):
|
|
| 47 |
if not transcribed_text.strip():
|
| 48 |
voice_state_manager.reset_task_id()
|
| 49 |
continue
|
| 50 |
-
|
| 51 |
self.log_task_user_question(voice_task)
|
| 52 |
|
| 53 |
voice_task.whisper_end_time = time.time()
|
| 54 |
|
| 55 |
task_id = voice_task.id
|
| 56 |
-
cached_user_question = self.cached_user_questions.get(task_id, [])
|
| 57 |
-
cached_user_question.append(transcribed_text)
|
| 58 |
|
|
|
|
| 59 |
if voice_task.is_over_audio_frames_threshold:
|
|
|
|
| 60 |
self.cached_user_questions[task_id] = cached_user_question
|
| 61 |
|
| 62 |
answer_id = voice_task.answer_id
|
|
@@ -72,4 +71,4 @@ class ASRWorker(BaseThread, PerformanceLogMixin):
|
|
| 72 |
voice_task.transcribed_text = ' '.join(cached_user_question) if cached_user_question else transcribed_text
|
| 73 |
|
| 74 |
voice_task.user_voice = []
|
| 75 |
-
self.transcribed_text_queue.put(voice_task)
|
|
|
|
| 38 |
except Empty:
|
| 39 |
continue
|
| 40 |
|
|
|
|
| 41 |
voice_task.language = self.language
|
| 42 |
voice_task.whisper_start_time = time.time()
|
| 43 |
|
|
|
|
| 46 |
if not transcribed_text.strip():
|
| 47 |
voice_state_manager.reset_task_id()
|
| 48 |
continue
|
| 49 |
+
|
| 50 |
self.log_task_user_question(voice_task)
|
| 51 |
|
| 52 |
voice_task.whisper_end_time = time.time()
|
| 53 |
|
| 54 |
task_id = voice_task.id
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
cached_user_question = self.cached_user_questions.get(task_id, [])
|
| 57 |
if voice_task.is_over_audio_frames_threshold:
|
| 58 |
+
cached_user_question.append(transcribed_text)
|
| 59 |
self.cached_user_questions[task_id] = cached_user_question
|
| 60 |
|
| 61 |
answer_id = voice_task.answer_id
|
|
|
|
| 71 |
voice_task.transcribed_text = ' '.join(cached_user_question) if cached_user_question else transcribed_text
|
| 72 |
|
| 73 |
voice_task.user_voice = []
|
| 74 |
+
self.transcribed_text_queue.put(voice_task.model_copy())
|