File size: 6,983 Bytes
a9fb7e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import gradio as gr
from config import CHAT_SYSTEM_PROMPT_PLACEHOLDER, CHAT_MODEL_SPECS
from models import get_model_response
import logging
import copy

logger = logging.getLogger(__name__)

# --- Backend Logic ---

def handle_chat(message, history, system_prompt, temperature, model_id):
    """处理聊天消息提交的核心函数"""
    logger.debug(f"handle_chat 输入: message={message}, history={history}, system_prompt={system_prompt}, temperature={temperature}, model_id={model_id}")
    if history is None:
        history = []
    history = copy.deepcopy(history)
    history.append((message, ""))
    
    # 从 spec 中获取用于显示的名称
    model_display_name = CHAT_MODEL_SPECS.get(model_id, {}).get("display_name", model_id)
    
    is_first_chunk = True
    for chunk in get_model_response(model_id, history, system_prompt, temperature):
        if is_first_chunk:
            # 在第一个块前加上模型名称
            history[-1] = (message, f"**{model_display_name}**\n\n" + chunk)
            is_first_chunk = False
        else:
            history[-1] = (message, history[-1][1] + chunk)
        yield copy.deepcopy(history), ""

# --- UI Event Handlers ---

def handle_model_change(model_id):
    """当用户切换模型时,更新UI"""
    spec = CHAT_MODEL_SPECS[model_id]
    scenarios = spec.get("prompt_scenarios", [])
    
    # 默认加载第一个场景
    if scenarios:
        first_scenario = scenarios[0]
        scenario_titles = [[s["title"]] for s in scenarios]
        message_examples = [[m] for m in first_scenario["message_examples"]]
        system_prompt_value = first_scenario["system_prompt"]
    else: # 兼容没有场景的情况
        scenario_titles = []
        message_examples = []
        system_prompt_value = ""

    return (
        gr.update(value=spec["description"]),
        gr.update(samples=scenario_titles),
        gr.update(value=system_prompt_value),
        gr.update(samples=message_examples)
    )

def handle_scenario_selection(model_id, evt: gr.SelectData):
    """当用户从场景数据集中选择一个场景时,更新UI"""
    logger.debug(f"--- Scenario Selection Event ---")
    logger.debug(f"Selected event value: {evt.value}")
    logger.debug(f"Type of event value: {type(evt.value)}")
    
    # 修正:从列表中提取字符串
    selected_title = evt.value[0] if isinstance(evt.value, list) and evt.value else None
    if not selected_title:
        logger.error("Selected event value is not a valid list or is empty.")
        return gr.update(), gr.update()

    spec = CHAT_MODEL_SPECS[model_id]
    scenarios = spec.get("prompt_scenarios", [])
    
    available_titles = [s['title'] for s in scenarios]
    logger.debug(f"Available scenario titles for model '{model_id}': {available_titles}")

    selected_scenario = next((s for s in scenarios if s["title"] == selected_title), None)
    
    if selected_scenario:
        logger.debug(f"Found matching scenario: '{selected_title}'")
        system_prompt_value = selected_scenario["system_prompt"]
        message_examples = [[m] for m in selected_scenario["message_examples"]]
        return gr.update(value=system_prompt_value), gr.update(samples=message_examples)
    
    logger.warning(f"No matching scenario found for title: '{selected_title}'")
    # 如果找不到场景,则不更新
    return gr.update(), gr.update()

# --- UI Creation ---

def create_chat_tab():
    """创建并返回聊天标签页的所有Gradio组件"""
    
    # 从配置中提取模型信息用于UI展示
    # choices 是一个 (display_name, model_id) 的元组列表
    model_choices = [(spec["display_name"], model_id) for model_id, spec in CHAT_MODEL_SPECS.items()]
    default_model_id = list(CHAT_MODEL_SPECS.keys())[0]
    default_spec = CHAT_MODEL_SPECS[default_model_id]
    default_scenarios = default_spec.get("prompt_scenarios", [])
    
    with gr.TabItem("聊天", id="chat_tab"):
        with gr.Row():
            with gr.Column(scale=3):
                chatbot = gr.Chatbot(
                    label="聊天窗口",
                    bubble_full_width=False,
                    height=500,
                    value=[(None, "Hello! I'm Ling. Try selecting a scenario and a message example below to get started.")]
                )
                with gr.Row():
                    chat_input = gr.Textbox(placeholder="Ask me anything...", label="输入框", show_label=False, scale=4)
                    send_button = gr.Button("发送", variant="primary", scale=1)

                # 新的场景化示例区域
                with gr.Accordion("✨ 试试这些场景...", open=True):
                    # 场景选择器
                    scenario_selector = gr.Dataset(
                        components=[gr.Textbox(visible=False)],
                        samples=[[s["title"]] for s in default_scenarios],
                        label="系统提示示例",
                        headers=["选择一个角色或任务来开始:"],
                    )
                    # 消息示例
                    message_examples_display = gr.Dataset(
                        components=[chat_input],
                        samples=[[m] for m in default_scenarios[0]["message_examples"]] if default_scenarios else [],
                        label="消息示例",
                        headers=["然后,试试这些具体问题:"],
                    )

            with gr.Column(scale=1):
                model_selector = gr.Radio(
                    choices=model_choices, 
                    label="选择模型", 
                    value=default_model_id
                )
                model_description = gr.Markdown(default_spec["description"])
                system_prompt = gr.Textbox(
                    label="System Prompt", 
                    lines=8, 
                    placeholder=CHAT_SYSTEM_PROMPT_PLACEHOLDER,
                    value=default_scenarios[0]["system_prompt"] if default_scenarios else ""
                )
                temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=1.0, step=0.1, label="Temperature")

    # --- Event Listeners ---
    model_selector.change(
        fn=handle_model_change,
        inputs=[model_selector],
        outputs=[model_description, scenario_selector, system_prompt, message_examples_display]
    )
    
    scenario_selector.select(
        fn=handle_scenario_selection,
        inputs=[model_selector],
        outputs=[system_prompt, message_examples_display]
    )

    message_examples_display.click(
        fn=lambda value: value[0],
        inputs=[message_examples_display],
        outputs=[chat_input]
    )
    
    return {
        "chatbot": chatbot,
        "chat_input": chat_input,
        "send_button": send_button,
        "system_prompt": system_prompt,
        "temperature_slider": temperature_slider,
        "model_selector": model_selector,
    }