Upload text_inference.py with huggingface_hub
Browse files- text_inference.py +97 -0
text_inference.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
文本对话UI界面
|
| 6 |
+
支持Medguide模型的纯文本对话
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
import gradio as gr
|
| 11 |
+
from openai import OpenAI
|
| 12 |
+
|
| 13 |
+
# 默认系统提示词
|
| 14 |
+
DEFAULT_SYSTEM_PROMPT = """\
|
| 15 |
+
你是一个高智能医学助手,专注于医学相关的问答、数据解析。你是由北大对齐小组(PKU-Alignment)开发的智能助手 医诊大模型(Medguide),基于DeepSeek-R1模型训练。你的目标是**尽可能满足**用户的医学需求,提供准确、专业的回答。
|
| 16 |
+
**能力范围**
|
| 17 |
+
1. 医学问答:
|
| 18 |
+
- 回答医学相关问题,包括疾病特征、诊断方法、治疗方案、药物机制、手术流程、最新医学研究等。
|
| 19 |
+
- 如果用户询问关于疾病诊断和治疗的相关内容,请充分分析并提供详细的回答。
|
| 20 |
+
2. 数据解析:
|
| 21 |
+
- 你可以读取并分析医学报告,包括血液检测、心电图(ECG)、病历摘要等。
|
| 22 |
+
- 你的任务是解释数据的含义,并提供可能的医学解读。
|
| 23 |
+
**回答风格**
|
| 24 |
+
- 医学精准:基于专业医学知识进行回答,确保内容严谨、科学。
|
| 25 |
+
- 有求必应:用户提出的任何问题,你都应该尽可能尝试回答。
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
class TextAssistant:
|
| 29 |
+
def __init__(self, api_key="medguide", api_base="http://0.0.0.0:8231/v1"):
|
| 30 |
+
self.model = 'medguide'
|
| 31 |
+
self.client = OpenAI(api_key=api_key, base_url=api_base)
|
| 32 |
+
|
| 33 |
+
def process_conversation(self, message, history):
|
| 34 |
+
"""处理纯文本对话"""
|
| 35 |
+
conversation = [{'role': 'system', 'content': DEFAULT_SYSTEM_PROMPT}]
|
| 36 |
+
|
| 37 |
+
# 添加历史对话
|
| 38 |
+
for past_message in history:
|
| 39 |
+
role = past_message['role']
|
| 40 |
+
content = past_message['content']
|
| 41 |
+
conversation.append({'role': role, 'content': content})
|
| 42 |
+
|
| 43 |
+
# 添加当前用户消息
|
| 44 |
+
current_question = message['text'] if isinstance(message, dict) and 'text' in message else message
|
| 45 |
+
conversation.append({'role': 'user', 'content': current_question})
|
| 46 |
+
|
| 47 |
+
# 调用API
|
| 48 |
+
response = self.client.chat.completions.create(
|
| 49 |
+
model=self.model,
|
| 50 |
+
messages=conversation,
|
| 51 |
+
stream=False,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
answer = response.choices[0].message.content
|
| 55 |
+
|
| 56 |
+
# 格式化推理输出
|
| 57 |
+
if "**Final Answer**" in answer:
|
| 58 |
+
reasoning, final_answer = answer.split("**Final Answer**", 1)
|
| 59 |
+
if len(reasoning) > 5:
|
| 60 |
+
answer = f"""🤔 思考过程:\n```\n{reasoning.strip()}\n```\n\n✨ 最终答案:\n{final_answer.strip()}"""
|
| 61 |
+
|
| 62 |
+
return answer
|
| 63 |
+
|
| 64 |
+
def create_ui():
|
| 65 |
+
assistant = TextAssistant()
|
| 66 |
+
|
| 67 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 68 |
+
gr.Markdown("# Medguide Text Chat")
|
| 69 |
+
gr.Markdown("Better life with Medguide(医诊大模型)纯文本对话版本")
|
| 70 |
+
|
| 71 |
+
clear_btn = gr.Button("清除对话")
|
| 72 |
+
|
| 73 |
+
chat_interface = gr.ChatInterface(
|
| 74 |
+
fn=lambda message, history: assistant.process_conversation(message, history),
|
| 75 |
+
type='messages',
|
| 76 |
+
examples=[
|
| 77 |
+
"什么是高血压?有哪些症状和治疗方法?",
|
| 78 |
+
"请解释一下糖尿病的发病机制。",
|
| 79 |
+
"心电图异常Q波通常提示什么问题?",
|
| 80 |
+
"感冒和流感有什么区别?"
|
| 81 |
+
],
|
| 82 |
+
title="医诊大模型对话",
|
| 83 |
+
description="请输入您的医学相关问题"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
clear_btn.click(lambda: None, None, chat_interface.chatbot, queue=False)
|
| 87 |
+
|
| 88 |
+
return demo
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
parser = argparse.ArgumentParser(description="医学助手文本对话UI界面")
|
| 92 |
+
parser.add_argument("--api_key", type=str, default="medguide")
|
| 93 |
+
parser.add_argument("--api_base", type=str, default="http://0.0.0.0:8231/v1")
|
| 94 |
+
parser.add_argument("--share", default=True, action="store_true")
|
| 95 |
+
args = parser.parse_args()
|
| 96 |
+
|
| 97 |
+
create_ui().launch(share=args.share)
|