liumaolin
commited on
Commit
·
e80f558
1
Parent(s):
d691bbc
Add thread readiness checks and is_ready property across services
Browse files- src/VoiceDialogue/main.py +5 -0
- src/VoiceDialogue/services/audio/aec_audio_capture.py +2 -0
- src/VoiceDialogue/services/audio/audio_answer.py +12 -9
- src/VoiceDialogue/services/audio/audio_player.py +11 -8
- src/VoiceDialogue/services/core/base.py +12 -0
- src/VoiceDialogue/services/speech/asr_service.py +2 -0
- src/VoiceDialogue/services/speech/speech_monitor.py +2 -0
- src/VoiceDialogue/services/text/text_generator.py +2 -0
src/VoiceDialogue/main.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import typing
|
| 2 |
from multiprocessing import Queue
|
| 3 |
from pathlib import Path
|
|
@@ -79,6 +80,10 @@ def launch_system(
|
|
| 79 |
audio_playing_worker = AudioStreamPlayer(audio_playing_queue=tts_generated_audio_queue)
|
| 80 |
audio_playing_worker.start()
|
| 81 |
threads.append(audio_playing_worker)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
# audio_frame_probe.start_record()
|
| 83 |
print(f'{"=" * 80}\n服务启动成功\n{"=" * 80}')
|
| 84 |
for thread in threads:
|
|
|
|
| 1 |
+
import time
|
| 2 |
import typing
|
| 3 |
from multiprocessing import Queue
|
| 4 |
from pathlib import Path
|
|
|
|
| 80 |
audio_playing_worker = AudioStreamPlayer(audio_playing_queue=tts_generated_audio_queue)
|
| 81 |
audio_playing_worker.start()
|
| 82 |
threads.append(audio_playing_worker)
|
| 83 |
+
|
| 84 |
+
while not all([thread.is_ready for thread in threads]):
|
| 85 |
+
time.sleep(0.1)
|
| 86 |
+
|
| 87 |
# audio_frame_probe.start_record()
|
| 88 |
print(f'{"=" * 80}\n服务启动成功\n{"=" * 80}')
|
| 89 |
for thread in threads:
|
src/VoiceDialogue/services/audio/aec_audio_capture.py
CHANGED
|
@@ -32,6 +32,8 @@ class EchoCancellingAudioCapture(BaseThread):
|
|
| 32 |
audio_recorder.freeAudioData.argtypes = [ctypes.POINTER(ctypes.c_ubyte)]
|
| 33 |
audio_recorder.startRecord()
|
| 34 |
|
|
|
|
|
|
|
| 35 |
try:
|
| 36 |
while not self.stopped():
|
| 37 |
size = ctypes.c_int(0)
|
|
|
|
| 32 |
audio_recorder.freeAudioData.argtypes = [ctypes.POINTER(ctypes.c_ubyte)]
|
| 33 |
audio_recorder.startRecord()
|
| 34 |
|
| 35 |
+
self.is_ready = True
|
| 36 |
+
|
| 37 |
try:
|
| 38 |
while not self.stopped():
|
| 39 |
size = ctypes.c_int(0)
|
src/VoiceDialogue/services/audio/audio_answer.py
CHANGED
|
@@ -23,15 +23,8 @@ class TTSAudioGenerator(BaseThread):
|
|
| 23 |
self.processed_answer_queue: Queue = processed_answer_queue
|
| 24 |
self.tts_generated_audio_queue: Queue = tts_generated_audio_queue
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
self.tts_module = TTSModule(tts_config)
|
| 30 |
-
self.tts_module.setup_inference_params(
|
| 31 |
-
ref_audio=voice_role.reference_audio_path,
|
| 32 |
-
parallel_infer=False,
|
| 33 |
-
**voice_role.inference_parameters
|
| 34 |
-
)
|
| 35 |
|
| 36 |
def setup_tts_config(self, device, voice_role: VoiceModel):
|
| 37 |
config = {
|
|
@@ -60,8 +53,18 @@ class TTSAudioGenerator(BaseThread):
|
|
| 60 |
|
| 61 |
def run(self):
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
self.warmup()
|
| 64 |
|
|
|
|
|
|
|
| 65 |
while not self.stopped():
|
| 66 |
try:
|
| 67 |
voice_task: VoiceTask = self.processed_answer_queue.get(block=False, timeout=0.1)
|
|
|
|
| 23 |
self.processed_answer_queue: Queue = processed_answer_queue
|
| 24 |
self.tts_generated_audio_queue: Queue = tts_generated_audio_queue
|
| 25 |
|
| 26 |
+
self._device = "cpu" # mps slower 11.66(cpu) vs 39.42(mps)
|
| 27 |
+
self._voice_role = voice_role
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
def setup_tts_config(self, device, voice_role: VoiceModel):
|
| 30 |
config = {
|
|
|
|
| 53 |
|
| 54 |
def run(self):
|
| 55 |
|
| 56 |
+
tts_config = self.setup_tts_config(self._device, self._voice_role)
|
| 57 |
+
|
| 58 |
+
self.tts_module = TTSModule(tts_config)
|
| 59 |
+
self.tts_module.setup_inference_params(
|
| 60 |
+
ref_audio=self._voice_role.reference_audio_path,
|
| 61 |
+
parallel_infer=False,
|
| 62 |
+
**self._voice_role.inference_parameters
|
| 63 |
+
)
|
| 64 |
self.warmup()
|
| 65 |
|
| 66 |
+
self.is_ready = True
|
| 67 |
+
|
| 68 |
while not self.stopped():
|
| 69 |
try:
|
| 70 |
voice_task: VoiceTask = self.processed_answer_queue.get(block=False, timeout=0.1)
|
src/VoiceDialogue/services/audio/audio_player.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import tempfile
|
|
|
|
| 2 |
from collections import OrderedDict
|
| 3 |
from multiprocessing import Queue
|
| 4 |
from queue import Empty
|
|
@@ -23,6 +24,8 @@ class AudioStreamPlayer(BaseThread):
|
|
| 23 |
self.audio_playing_queue: Queue = audio_playing_queue
|
| 24 |
|
| 25 |
def run(self):
|
|
|
|
|
|
|
| 26 |
while not self.stopped():
|
| 27 |
|
| 28 |
try:
|
|
@@ -54,14 +57,14 @@ class AudioStreamPlayer(BaseThread):
|
|
| 54 |
if answer_id not in voice_state_manager.waiting_second_answer_mapping:
|
| 55 |
continue
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
|
| 66 |
self.update_chat_history(voice_task)
|
| 67 |
|
|
|
|
| 1 |
import tempfile
|
| 2 |
+
import time
|
| 3 |
from collections import OrderedDict
|
| 4 |
from multiprocessing import Queue
|
| 5 |
from queue import Empty
|
|
|
|
| 24 |
self.audio_playing_queue: Queue = audio_playing_queue
|
| 25 |
|
| 26 |
def run(self):
|
| 27 |
+
self.is_ready = True
|
| 28 |
+
|
| 29 |
while not self.stopped():
|
| 30 |
|
| 31 |
try:
|
|
|
|
| 57 |
if answer_id not in voice_state_manager.waiting_second_answer_mapping:
|
| 58 |
continue
|
| 59 |
|
| 60 |
+
now = time.time()
|
| 61 |
+
print(
|
| 62 |
+
f'整体耗时: {(now - voice_task.send_time):.2f}\n'
|
| 63 |
+
f'Whisper/FunASR 耗时: {(voice_task.whisper_end_time - voice_task.whisper_start_time):.2f}\n'
|
| 64 |
+
f'LLM 耗时: {(voice_task.llm_end_time - voice_task.llm_start_time):.2f}\n'
|
| 65 |
+
f'TTS generate sentence: {voice_task.answer_sentence}\n'
|
| 66 |
+
f'TTS 耗时: {(voice_task.tts_end_time - voice_task.tts_start_time):.2f}\n\n'
|
| 67 |
+
)
|
| 68 |
|
| 69 |
self.update_chat_history(voice_task)
|
| 70 |
|
src/VoiceDialogue/services/core/base.py
CHANGED
|
@@ -6,9 +6,21 @@ class BaseThread(threading.Thread):
|
|
| 6 |
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
|
| 7 |
super().__init__(group, target, name, args, kwargs, daemon=daemon)
|
| 8 |
self._stop_event = threading.Event()
|
|
|
|
| 9 |
|
| 10 |
def stop(self):
|
| 11 |
self._stop_event.set()
|
| 12 |
|
| 13 |
def stopped(self):
|
| 14 |
return self._stop_event.is_set()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
|
| 7 |
super().__init__(group, target, name, args, kwargs, daemon=daemon)
|
| 8 |
self._stop_event = threading.Event()
|
| 9 |
+
self._is_ready_event = threading.Event()
|
| 10 |
|
| 11 |
def stop(self):
|
| 12 |
self._stop_event.set()
|
| 13 |
|
| 14 |
def stopped(self):
|
| 15 |
return self._stop_event.is_set()
|
| 16 |
+
|
| 17 |
+
@property
|
| 18 |
+
def is_ready(self):
|
| 19 |
+
return self._is_ready_event.is_set()
|
| 20 |
+
|
| 21 |
+
@is_ready.setter
|
| 22 |
+
def is_ready(self, value: bool):
|
| 23 |
+
if value:
|
| 24 |
+
self._is_ready_event.set()
|
| 25 |
+
else:
|
| 26 |
+
self._is_ready_event.clear()
|
src/VoiceDialogue/services/speech/asr_service.py
CHANGED
|
@@ -182,6 +182,8 @@ class ASRWorker(BaseThread):
|
|
| 182 |
self.client = UnifiedASRClient(self.language)
|
| 183 |
self.client.warmup()
|
| 184 |
|
|
|
|
|
|
|
| 185 |
while not self.stopped():
|
| 186 |
voice_task: VoiceTask = self.user_voice_queue.get()
|
| 187 |
voice_task.language = self.language
|
|
|
|
| 182 |
self.client = UnifiedASRClient(self.language)
|
| 183 |
self.client.warmup()
|
| 184 |
|
| 185 |
+
self.is_ready = True
|
| 186 |
+
|
| 187 |
while not self.stopped():
|
| 188 |
voice_task: VoiceTask = self.user_voice_queue.get()
|
| 189 |
voice_task.language = self.language
|
src/VoiceDialogue/services/speech/speech_monitor.py
CHANGED
|
@@ -210,6 +210,8 @@ class SpeechStateMonitor(BaseThread):
|
|
| 210 |
主运行循环 - 监控语音状态并处理音频帧
|
| 211 |
"""
|
| 212 |
|
|
|
|
|
|
|
| 213 |
# 初始化状态变量
|
| 214 |
audio_frames = np.array([])
|
| 215 |
is_audio_sent_for_processing = False
|
|
|
|
| 210 |
主运行循环 - 监控语音状态并处理音频帧
|
| 211 |
"""
|
| 212 |
|
| 213 |
+
self.is_ready = True
|
| 214 |
+
|
| 215 |
# 初始化状态变量
|
| 216 |
audio_frames = np.array([])
|
| 217 |
is_audio_sent_for_processing = False
|
src/VoiceDialogue/services/text/text_generator.py
CHANGED
|
@@ -182,6 +182,8 @@ class LLMResponseGenerator(BaseThread):
|
|
| 182 |
pipeline = create_langchain_pipeline(self.model_instance, CHINESE_SYSTEM_PROMPT, self.get_session_history)
|
| 183 |
warmup_langchain_pipeline(pipeline)
|
| 184 |
|
|
|
|
|
|
|
| 185 |
"""主运行循环"""
|
| 186 |
while not self.stopped():
|
| 187 |
try:
|
|
|
|
| 182 |
pipeline = create_langchain_pipeline(self.model_instance, CHINESE_SYSTEM_PROMPT, self.get_session_history)
|
| 183 |
warmup_langchain_pipeline(pipeline)
|
| 184 |
|
| 185 |
+
self.is_ready = True
|
| 186 |
+
|
| 187 |
"""主运行循环"""
|
| 188 |
while not self.stopped():
|
| 189 |
try:
|