liumaolin
commited on
Commit
·
d205383
1
Parent(s):
eeb7fe5
为LLMResponseGenerator类添加TaskStatusMixin混入,以增强任务有效性检查功能,确保在生成回答时任务状态的有效性。
Browse files
src/voice_dialogue/services/text/generator.py
CHANGED
|
@@ -12,6 +12,7 @@ from voice_dialogue.config.user_config import get_prompt
|
|
| 12 |
from voice_dialogue.core.base import BaseThread
|
| 13 |
from voice_dialogue.core.constants import chat_history_cache
|
| 14 |
from voice_dialogue.models.voice_task import VoiceTask, QuestionDisplayMessage
|
|
|
|
| 15 |
from voice_dialogue.services.text.processor import (
|
| 16 |
preprocess_sentence_text, create_langchain_chat_llamacpp_instance,
|
| 17 |
create_langchain_pipeline, warmup_langchain_pipeline
|
|
@@ -19,7 +20,7 @@ from voice_dialogue.services.text.processor import (
|
|
| 19 |
from voice_dialogue.utils.logger import logger
|
| 20 |
|
| 21 |
|
| 22 |
-
class LLMResponseGenerator(BaseThread):
|
| 23 |
"""LLM 回答生成器 - 负责使用语言模型生成回答文本"""
|
| 24 |
|
| 25 |
def __init__(
|
|
@@ -152,6 +153,10 @@ class LLMResponseGenerator(BaseThread):
|
|
| 152 |
|
| 153 |
try:
|
| 154 |
for chunk in pipeline.stream(input={'input': user_question}, config=config):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
if not chunk.content:
|
| 156 |
continue
|
| 157 |
elif chunk.content in {'<think>', '\n\n', '</think>'}:
|
|
|
|
| 12 |
from voice_dialogue.core.base import BaseThread
|
| 13 |
from voice_dialogue.core.constants import chat_history_cache
|
| 14 |
from voice_dialogue.models.voice_task import VoiceTask, QuestionDisplayMessage
|
| 15 |
+
from voice_dialogue.services.mixins import TaskStatusMixin
|
| 16 |
from voice_dialogue.services.text.processor import (
|
| 17 |
preprocess_sentence_text, create_langchain_chat_llamacpp_instance,
|
| 18 |
create_langchain_pipeline, warmup_langchain_pipeline
|
|
|
|
| 20 |
from voice_dialogue.utils.logger import logger
|
| 21 |
|
| 22 |
|
| 23 |
+
class LLMResponseGenerator(BaseThread, TaskStatusMixin):
|
| 24 |
"""LLM 回答生成器 - 负责使用语言模型生成回答文本"""
|
| 25 |
|
| 26 |
def __init__(
|
|
|
|
| 153 |
|
| 154 |
try:
|
| 155 |
for chunk in pipeline.stream(input={'input': user_question}, config=config):
|
| 156 |
+
|
| 157 |
+
if not self.is_task_valid(voice_task):
|
| 158 |
+
return
|
| 159 |
+
|
| 160 |
if not chunk.content:
|
| 161 |
continue
|
| 162 |
elif chunk.content in {'<think>', '\n\n', '</think>'}:
|