intersteller2887 commited on
Commit
8149dc4
·
verified ·
1 Parent(s): 2d868e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +474 -473
app.py CHANGED
@@ -1,474 +1,475 @@
1
- import gradio as gr
2
- import os
3
- import json
4
- import pandas as pd
5
- from huggingface_hub import HfApi
6
-
7
- # ==============================================================================
8
- # 数据定义 (Data Definition)
9
- # ==============================================================================
10
- DIMENSIONS_DATA = [
11
- {
12
- "title": "语义和语用特征",
13
- "audio": "audio/sample1.wav",
14
- "desc": "这是“语义和语用特征”维度的文本描述示例。",
15
- "sub_dims": [
16
- "记忆一致性:回应者是否能够正确并正确并延续并记忆并延续对话信息?是否存在对上下文的误解或不自洽?", "逻辑连贯性:回应者在语义与对话结构上保持前后一致、合乎逻辑?是否存在前后矛盾的情况?",
17
- "常见多音字处理:是否能再上下文中正确使用常见多音字?", "多语言混杂:是否存在自然的语言切换现象?如中英混杂、文化化表达。",
18
- "语言不精确性:是否出现打断、自纠正等人类似语言行为?是否存在如“差不多”、“可能吧”这类表达不确定性的用法?", "填充词使用:如“呃”、“嗯”等自然语流中的停顿或过渡词,使用是否得体且自然?",
19
- "隐喻与语用用意:是否展现出复杂的语用功能(如讽刺、劝阻、暗示等),以及对活在含义层次的理解能力?"
20
- ],
21
- "reference":"""
22
- <p>🔴 <strong>记忆一致性:</strong> 在说话人明确提出自己已经中年后,回应者仍做出了他是青少年的错误假定</p>
23
- <p>🔴 <strong>逻辑连贯性:</strong> 回应者在第一轮对话中说他说的话并不重要,但在第二轮对话中说他说的话“能够改变你的一生”</p>
24
- <p>🔴 <strong>常见多音字处理:</strong> 该条对话中未出现多音字</p>
25
- <p>🟢 <strong>多语言混杂:</strong> 回应者在回复中夹杂了"I see",回复中存在多语言混杂</p>
26
- <p>🔴 <strong>语言不精确性:</strong> 回应者使用的语言中未夹杂任何的不确定性</p>
27
- <p>🟢 <strong>填充词使用:</strong> 回应者在回复中使用了“嗯”这个填充词</p>
28
- <p>🔴 <strong>隐喻与语用用意:</strong> 回应者误将说话人的挖苦当成了真心的赞扬</p>
29
- """
30
- },
31
- {
32
- "title": "非生理性副语言特征",
33
- "audio": "audio/sample1.wav",
34
- "desc": "这是“非生理性副语言特征”维度的文本描述示例。",
35
- "sub_dims": [
36
- "节奏:回应者是否存在自然的停顿?语速是否存在自然、流畅的变化?", "语调:在表达疑问、惊讶、强调时,回应者的音调是否会自然上扬或下降?是否表现出符合语境的变化?",
37
- "重读:是否存在句中关键词上有意识地加重语气?", "辅助性发声:是否存在叹气、短哼、笑声等辅助情绪的非语言性发声?这些发声是否在语境中正确表达了情绪或意图?"
38
- ],
39
- "reference": """
40
- <p>🟢 <strong>节奏:</strong> 回应者的语速变化、停顿都较为自然</p>
41
- <p>🔴 <strong>语调:</strong> 回应者的音调不存在显著变化</p>
42
- <p>🔴 <strong>重读:</strong> 回应者语气不存在显著变化</p>
43
- <p>🔴 <strong>辅助性发声:</strong> 尽管回应者发出了叹气的声音,但是该发声并未传递出语境下应有的失落情堵</p>
44
- """
45
- },
46
- {
47
- "title": "生理性副语言特征",
48
- "audio": "audio/sample1.wav",
49
- "desc": "这是“生理性副语言特征”维度的文本描述示例。",
50
- "sub_dims": [
51
- "微生理杂音:回应中是否出现如呼吸声、口水音、气泡音等无意识发声?这些发声是否自然地穿插在恰当的语流节奏当中?",
52
- "发音不稳定性:回应者是否出现连读、颤音、鼻音等不稳定发音?", "口音:(如果存在的话)回应者的口音是否自然?是否存在机械式的元辅音发音风格?"
53
- ],
54
- "reference": """
55
- <p>🔴 <strong>微生理杂音:</strong> 回应中不存在任何无意识发声</p>
56
- <p>🔴 <strong>发音不稳定性:</strong> 回应者的咬字清晰、发音标准</p>
57
- <p>🟢 <strong>口音:</strong> 回应者的口音自然</p>
58
- """
59
- },
60
- {
61
- "title": "机械人格",
62
- "audio": "audio/sample1.wav",
63
- "desc": "这是“机械人格”维度的文本描述示例。",
64
- "sub_dims": [
65
- "谄媚现��:回应者是否频繁地赞同用户、重复用户的说法、不断表示感谢或道歉?是否存在“无论用户说什么都肯定或支持”的语气模式?",
66
- "书面化表达:回应的内容是否缺乏口语化特征?句式是否整齐划一、结构完整却缺乏真实交流中的松散感或灵活性?是否使用抽象或泛泛的措辞来回避具体问题?"
67
- ],
68
- "reference": """
69
- <p>🟢 <strong>谄媚现象:</strong> 回应者并未明显表现出谄媚现象的特征</p>
70
- <p>🔴 <strong>书面化表达:</strong> 回应的内容结构过于缜密,符合书面用语特征</p>
71
- """
72
- },
73
- {
74
- "title": "情感表达",
75
- "audio": "audio/sample1.wav",
76
- "desc": "这是“情感表达”维度的文本描述示例。",
77
- "sub_dims": [
78
- "语义层面:回应者的语言内容是否体现出符合上下文的情绪反应?是否表达了人类对某些情境应有的情感态度?",
79
- "声学层面:回应者的声音情绪是否与语义一致?语调是否有自然的高低起伏来表达情绪变化?是否出现回应内容与声音传达出的情绪不吻合的现象?"
80
- ],
81
- "reference": """
82
- <p>🔴 <strong>语义层面:</strong> 说话者阐述了一件伤心的事情,而回应者的语言内容中体现出了恰当的悲伤情绪</p>
83
- <p>🟢 <strong>声学层面:</strong> 回应者的语音特征与情感表达不匹配。语言内容中表达出了悲伤的情感,但语音特征平淡、缺少变化</p>
84
- """
85
- }
86
- ]
87
- DIMENSION_TITLES = [d["title"] for d in DIMENSIONS_DATA]
88
- QUESTION_SET = [
89
- {"audio": "audio/Ses02F_impro01.wav", "desc": "这是第一个测试文件的描述",},
90
- {"audio": "audio/Ses02F_impro02.wav", "desc": "这是第二个测试文件的描述",},
91
- {"audio": "audio/Ses02F_impro03.wav", "desc": "这是第三个测试文件的描述",},
92
- ]
93
- MAX_SUB_DIMS = max(len(d['sub_dims']) for d in DIMENSIONS_DATA)
94
-
95
- # ==============================================================================
96
- # 功能函数定义 (Function Definitions)
97
- # ==============================================================================
98
- def start_challenge():
99
- return gr.update(visible=False), gr.update(visible=True)
100
-
101
- def toggle_education_other(choice):
102
- is_other = (choice == "其他(请注明)")
103
- return gr.update(visible=is_other, interactive=is_other, value="")
104
-
105
- def check_info_complete(age, gender, education, education_other):
106
- if age and gender and education:
107
- if education == "其他(请注明)" and not education_other.strip():
108
- return gr.update(interactive=False)
109
- return gr.update(interactive=True)
110
- return gr.update(interactive=False)
111
-
112
- def show_sample_page_and_init(age, gender, education, education_other, user_data):
113
- final_edu = education_other if education == "其他(请注明)" else education
114
- user_data.update({"age": age, "gender": gender, "education": final_edu})
115
- first_dim_title = DIMENSION_TITLES[0]
116
- return gr.update(visible=False), gr.update(visible=True), user_data, first_dim_title
117
-
118
- def update_sample_view(dimension_title):
119
- dim_data = next((d for d in DIMENSIONS_DATA if d["title"] == dimension_title), None)
120
- if dim_data:
121
- return (
122
- gr.update(value=dim_data["audio"]),
123
- gr.update(value=dim_data["desc"]),
124
- gr.update(choices=dim_data["sub_dims"], value=[], interactive=True),
125
- gr.update(value=dim_data["reference"])
126
- )
127
- return gr.update(), gr.update(), gr.update(), gr.update()
128
-
129
- def update_test_dimension_view(d_idx, selections):
130
- dimension = DIMENSIONS_DATA[d_idx]
131
- progress_d = f"维度 {d_idx + 1} / {len(DIMENSIONS_DATA)}: **{dimension['title']}**"
132
-
133
- existing_scores = selections.get(dimension['title'], {})
134
-
135
- slider_updates = []
136
- for i in range(MAX_SUB_DIMS):
137
- if i < len(dimension['sub_dims']):
138
- sub_dim_label = dimension['sub_dims'][i]
139
- value = existing_scores.get(sub_dim_label, 0)
140
- slider_updates.append(gr.update(visible=True, label=sub_dim_label, value=value))
141
- else:
142
- slider_updates.append(gr.update(visible=False, value=0))
143
-
144
- prev_btn_update = gr.update(interactive=(d_idx > 0))
145
- next_btn_update = gr.update(
146
- value="进入最终判断" if d_idx == len(DIMENSIONS_DATA) - 1 else "下一维度",
147
- interactive=True
148
- )
149
-
150
- return [gr.update(value=progress_d), prev_btn_update, next_btn_update] + slider_updates
151
-
152
- def init_test_question(user_data, q_idx):
153
- d_idx = 0
154
- question = QUESTION_SET[q_idx]
155
- progress_q = f"第 {q_idx + 1} / {len(QUESTION_SET)} 题"
156
-
157
- initial_updates = update_test_dimension_view(d_idx, {})
158
- dim_title_update, prev_btn_update, next_btn_update = initial_updates[:3]
159
- slider_updates = initial_updates[3:]
160
-
161
- return (
162
- gr.update(visible=False),
163
- gr.update(visible=True),
164
- gr.update(visible=False),
165
- gr.update(visible=False),
166
- q_idx, d_idx, {},
167
- gr.update(value=progress_q),
168
- dim_title_update,
169
- gr.update(value=question['audio']),
170
- gr.update(value=question['desc']),
171
- prev_btn_update,
172
- next_btn_update,
173
- gr.update(interactive=False),
174
- gr.update(interactive=False),
175
- ) + tuple(slider_updates)
176
-
177
- def navigate_dimensions(direction, q_idx, d_idx, selections, *slider_values):
178
- current_dim_data = DIMENSIONS_DATA[d_idx]
179
- current_sub_dims = current_dim_data['sub_dims']
180
- scores = {sub_dim: slider_values[i] for i, sub_dim in enumerate(current_sub_dims)}
181
- selections[current_dim_data['title']] = scores
182
-
183
- new_d_idx = d_idx + (1 if direction == "next" else -1)
184
-
185
- if direction == "next" and d_idx == len(DIMENSIONS_DATA) - 1:
186
- return (
187
- gr.update(visible=False),
188
- gr.update(visible=True),
189
- q_idx, d_idx, selections,
190
- gr.update(),
191
- gr.update(value=""),
192
- gr.update(),
193
- gr.update(),
194
- gr.update(interactive=True),
195
- gr.update(interactive=True),
196
- gr.update(interactive=False),
197
- gr.update(value="下一维度", interactive=False),
198
- ) + (gr.update(),) * MAX_SUB_DIMS
199
-
200
- else:
201
- view_updates = update_test_dimension_view(new_d_idx, selections)
202
- dim_title_update, prev_btn_update, next_btn_update = view_updates[:3]
203
- slider_updates = view_updates[3:]
204
-
205
- return (
206
- gr.update(), gr.update(),
207
- q_idx, new_d_idx, selections,
208
- gr.update(),
209
- dim_title_update,
210
- gr.update(),
211
- gr.update(),
212
- gr.update(interactive=False),
213
- gr.update(interactive=False),
214
- prev_btn_update,
215
- next_btn_update,
216
- ) + tuple(slider_updates)
217
-
218
- def submit_question_and_advance(q_idx, d_idx, selections, final_choice, all_results, user_data):
219
- selections["final_choice"] = final_choice
220
-
221
- final_question_result = {
222
- "question_id": q_idx, "audio_file": QUESTION_SET[q_idx]['audio'],
223
- "user_data": user_data, "selections": selections
224
- }
225
- all_results.append(final_question_result)
226
-
227
- q_idx += 1
228
-
229
- if q_idx < len(QUESTION_SET):
230
- init_q_updates = init_test_question(user_data, q_idx)
231
- return init_q_updates + (all_results, gr.update(value=""))
232
- else:
233
- result_str = "### 测试全部完成!\n\n你的提交结果概览:\n"
234
- for res in all_results:
235
- result_str += f"\n#### 题目: {res['audio_file']}\n"
236
- result_str += f"##### 最终判断: **{res['selections'].get('final_choice', '未选择')}**\n"
237
- for dim_title, dim_data in res['selections'].items():
238
- if dim_title == 'final_choice': continue
239
- result_str += f"- **{dim_title}**:\n"
240
- for sub_dim, score in dim_data.items():
241
- result_str += f" - *{sub_dim[:20]}...*: {score}/5\n"
242
-
243
- # This function now handles the upload to Hugging Face
244
- save_all_results_to_file(all_results, user_data)
245
-
246
- return (
247
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True),
248
- q_idx, d_idx, {},
249
- gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
250
- gr.update(), gr.update(),
251
- ) + (gr.update(),) * MAX_SUB_DIMS + (all_results, result_str)
252
-
253
- # MODIFIED FUNCTION TO SAVE TO HUGGING FACE DATASET
254
- def save_all_results_to_file(all_results, user_data):
255
- """
256
- Packages results and uploads them as a single JSON file to a Hugging Face Dataset.
257
- """
258
- # IMPORTANT: Change this to your Hugging Face username and dataset repo name
259
- repo_id = "Hu6ery/Turing-Test-Submissions"
260
-
261
- # Create a unique filename for the submission
262
- username = user_data.get("age", "user")
263
- filename = f"submission_{username}_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.json"
264
-
265
- # Package all data into a single dictionary
266
- final_data_package = {
267
- "user_info": user_data,
268
- "results": all_results
269
- }
270
-
271
- # Convert the dictionary to a JSON string in memory
272
- json_string = json.dumps(final_data_package, ensure_ascii=False, indent=4)
273
-
274
- # Get the Hugging Face token from the environment secrets
275
- hf_token = os.getenv("HF_TOKEN")
276
-
277
- if not hf_token:
278
- print("HF_TOKEN not found. Cannot upload to the Hub. Please set it in Space secrets.")
279
- return
280
-
281
- try:
282
- # Instantiate the HfApi client
283
- api = HfApi()
284
-
285
- # Upload the JSON string as a file to the specified dataset repository
286
- api.upload_file(
287
- path_or_fileobj=bytes(json_string, "utf-8"),
288
- path_in_repo=f"data/{filename}", # We recommend saving to a subfolder
289
- repo_id=repo_id,
290
- repo_type="dataset",
291
- token=hf_token,
292
- commit_message=f"Add new submission from {username}"
293
- )
294
- print(f"Successfully uploaded results to dataset: {repo_id}")
295
-
296
- except Exception as e:
297
- print(f"Error uploading to Hugging Face Hub: {e}")
298
-
299
- def toggle_reference_view(current):
300
- if current == "参考": return gr.update(visible=False), gr.update(visible=True), gr.update(value="返回")
301
- else: return gr.update(visible=True), gr.update(visible=False), gr.update(value="参考")
302
-
303
- def back_to_welcome():
304
- return (
305
- gr.update(visible=True), {}, 0, 0, {}, [],
306
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
307
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
308
- )
309
-
310
- # ==============================================================================
311
- # Gradio 界面定义 (Gradio UI Definition)
312
- # ==============================================================================
313
- with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px !important}") as demo:
314
- # --- 状态变量 (State Variables) ---
315
- user_data_state = gr.State({})
316
- current_question_index = gr.State(0)
317
- current_test_dimension_index = gr.State(0)
318
- current_question_selections = gr.State({})
319
- test_results = gr.State([])
320
-
321
- # --- 页面 (Pages) ---
322
- welcome_page = gr.Column(visible=True)
323
- info_page = gr.Column(visible=False)
324
- sample_page = gr.Column(visible=False)
325
- pretest_page = gr.Column(visible=False)
326
- test_page = gr.Column(visible=False)
327
- final_judgment_page = gr.Column(visible=False)
328
- result_page = gr.Column(visible=False)
329
- pages = {
330
- "welcome": welcome_page, "info": info_page, "sample": sample_page,
331
- "pretest": pretest_page, "test": test_page, "final_judgment": final_judgment_page,
332
- "result": result_page
333
- }
334
-
335
- with welcome_page:
336
- gr.Markdown("# AI 识破者\n你将听到一系列对话,请判断哪个回应者是 AI。")
337
- start_btn = gr.Button("开始挑战", variant="primary")
338
-
339
- with info_page:
340
- gr.Markdown("## 请提供一些基本信息")
341
- age_input = gr.Radio(["18岁以下", "18-25岁", "26-35岁", "36-50岁", "50岁以上"], label="年龄")
342
- gender_input = gr.Radio(["", "", "其他"], label="性别")
343
- education_input = gr.Radio(["高中及以下", "本科", "硕士", "博士", "其他(请注明)"], label="学历")
344
- education_other_input = gr.Textbox(label="请填写你的学历", visible=False, interactive=False)
345
- submit_info_btn = gr.Button("提交并开始学习样例", variant="primary", interactive=False)
346
-
347
- with sample_page:
348
- gr.Markdown("## 样例分析\n请选择一个维度进行学习。所有维度共用同一个样例音频。")
349
- sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="选择学习维度", value=DIMENSION_TITLES[0])
350
- with gr.Row():
351
- with gr.Column(scale=1):
352
- sample_audio = gr.Audio(label="样例音频", value=DIMENSIONS_DATA[0]["audio"])
353
- sample_desc = gr.Textbox(label="文本描述", interactive=False, value=DIMENSIONS_DATA[0]["desc"])
354
- with gr.Column(scale=2):
355
- with gr.Column(visible=True) as interactive_view:
356
- interactive_checkbox_group = gr.CheckboxGroup(label="维度特征", choices=DIMENSIONS_DATA[0]["sub_dims"], interactive=True)
357
- with gr.Column(visible=False) as reference_view:
358
- gr.Markdown("### 参考答案解析")
359
- reference_text = gr.Markdown(value=DIMENSIONS_DATA[0]["reference"])
360
- reference_btn = gr.Button("参考")
361
- go_to_pretest_btn = gr.Button("我明白了,开始测试", variant="primary")
362
-
363
- with pretest_page:
364
- gr.Markdown("## 测试说明\n"
365
- "- 对于每一道题,你都需要对全部 **5 个维度** 进行评估。\n"
366
- "- 在每个维度下,请为出现的每个特征 **从0到5打分**。\n"
367
-
368
- "- 完成5个维度的打分后,你将需要做出“人类”或“机器人”的 **最终判断**。\n"
369
- "- 你可以使用“上一维度”和“下一维度”按钮在5个维度间自由切换和修改分数。")
370
- go_to_test_btn = gr.Button("开始测试", variant="primary")
371
-
372
- with test_page:
373
- gr.Markdown("## 正式测试")
374
- question_progress_text = gr.Markdown()
375
- test_dimension_title = gr.Markdown()
376
- test_audio = gr.Audio(label="测试音频")
377
- test_desc = gr.Textbox(label="文本描述", interactive=False)
378
- gr.Markdown("--- \n ### 请为以下特征打分 (0-5分)")
379
-
380
- test_sliders = [gr.Slider(minimum=0, maximum=5, step=1, label=f"Sub-dim {i+1}", visible=False, interactive=True) for i in range(MAX_SUB_DIMS)]
381
-
382
- with gr.Row():
383
- prev_dim_btn = gr.Button("上一维度")
384
- next_dim_btn = gr.Button("下一维度", variant="primary")
385
-
386
- with final_judgment_page:
387
- gr.Markdown("## 最终判断")
388
- gr.Markdown("您已完成对所有维度的评分。请根据您的综合印象,做出最终判断。")
389
- final_human_robot_radio = gr.Radio(["👤 人类", "🤖 机器人"], label="请判断回应者类型 (必填)", interactive=False)
390
- submit_final_answer_btn = gr.Button("提交本题答案", variant="primary", interactive=False)
391
-
392
- with result_page:
393
- gr.Markdown("## 测试完成")
394
- result_text = gr.Markdown()
395
- back_to_welcome_btn = gr.Button("返回主界面", variant="primary")
396
-
397
- # ==============================================================================
398
- # 事件绑定 (Event Binding) & IO 列表定义
399
- # ==============================================================================
400
- test_init_outputs = [
401
- pretest_page, test_page, final_judgment_page, result_page,
402
- current_question_index, current_test_dimension_index, current_question_selections,
403
- question_progress_text, test_dimension_title, test_audio, test_desc,
404
- prev_dim_btn, next_dim_btn,
405
- final_human_robot_radio, submit_final_answer_btn,
406
- ] + test_sliders
407
-
408
- nav_inputs = [current_question_index, current_test_dimension_index, current_question_selections] + test_sliders
409
- nav_outputs = [
410
- test_page, final_judgment_page,
411
- current_question_index, current_test_dimension_index, current_question_selections,
412
- question_progress_text, test_dimension_title, test_audio, test_desc,
413
- final_human_robot_radio, submit_final_answer_btn,
414
- prev_dim_btn, next_dim_btn,
415
- ] + test_sliders
416
-
417
- full_outputs_with_results = test_init_outputs + [test_results, result_text]
418
-
419
- start_btn.click(fn=start_challenge, outputs=[welcome_page, info_page])
420
-
421
- for comp in [age_input, gender_input, education_input, education_other_input]:
422
- comp.change(fn=check_info_complete, inputs=[age_input, gender_input, education_input, education_other_input], outputs=submit_info_btn)
423
-
424
- education_input.change(fn=toggle_education_other, inputs=education_input, outputs=education_other_input)
425
-
426
- submit_info_btn.click(fn=show_sample_page_and_init, inputs=[age_input, gender_input, education_input, education_other_input, user_data_state], outputs=[info_page, sample_page, user_data_state, sample_dimension_selector])
427
-
428
- sample_dimension_selector.change(fn=update_sample_view, inputs=sample_dimension_selector, outputs=[sample_audio, sample_desc, interactive_checkbox_group, reference_text])
429
-
430
- reference_btn.click(fn=toggle_reference_view, inputs=reference_btn, outputs=[interactive_view, reference_view, reference_btn])
431
-
432
- go_to_pretest_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[sample_page, pretest_page])
433
-
434
- go_to_test_btn.click(
435
- fn=lambda user: init_test_question(user, 0) + ([], gr.update()),
436
- inputs=[user_data_state],
437
- outputs=full_outputs_with_results
438
- )
439
-
440
- prev_dim_btn.click(
441
- fn=lambda q,d,s, *sliders: navigate_dimensions("prev", q,d,s, *sliders),
442
- inputs=nav_inputs, outputs=nav_outputs
443
- )
444
-
445
- next_dim_btn.click(
446
- fn=lambda q,d,s, *sliders: navigate_dimensions("next", q,d,s, *sliders),
447
- inputs=nav_inputs, outputs=nav_outputs
448
- )
449
-
450
- submit_final_answer_btn.click(
451
- fn=submit_question_and_advance,
452
- inputs=[current_question_index, current_test_dimension_index, current_question_selections, final_human_robot_radio, test_results, user_data_state],
453
- outputs=full_outputs_with_results
454
- )
455
-
456
- back_to_welcome_btn.click(fn=back_to_welcome, outputs=list(pages.values()) + [user_data_state, current_question_index, current_test_dimension_index, current_question_selections, test_results])
457
-
458
- # ==============================================================================
459
- # 程序入口 (Entry Point)
460
- # ==============================================================================
461
- if __name__ == "__main__":
462
- if not os.path.exists("audio"):
463
- os.makedirs("audio")
464
- # A quick check to see if we're in a deployed Space, to avoid local errors.
465
- if "SPACE_ID" in os.environ:
466
- print("Running in a Hugging Face Space, checking for audio files...")
467
- # In a real deployment, you'd ensure the audio files are in the repo.
468
- # This is just a placeholder check.
469
- all_files = [q["audio"] for q in QUESTION_SET] + [d["audio"] for d in DIMENSIONS_DATA]
470
- for audio_file in set(all_files):
471
- if not os.path.exists(audio_file):
472
- print(f"⚠️ Warning: Audio file not found: {audio_file}")
473
-
 
474
  demo.launch(debug=True)
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import pandas as pd
5
+ from datasets import load_dataset
6
+ from huggingface_hub import HfApi
7
+
8
+ # ==============================================================================
9
+ # 数据定义 (Data Definition)
10
+ # ==============================================================================
11
+ DIMENSIONS_DATA = [
12
+ {
13
+ "title": "语义和语用特征",
14
+ "audio": "audio/sample1.wav",
15
+ "desc": "这是“语义和语用特征”维度的文本描述示例。",
16
+ "sub_dims": [
17
+ "记忆一致性:回应者是否能够正确并正确并延续并记忆并延续对话信息?是否存在对上下文的误解或不自洽?", "逻辑连贯性:回应者在语义与对话结构上保持前后一致、合乎逻辑?是否存在前后矛盾的情况?",
18
+ "常见多音字处理:是否能再上下文中正确使用常见多音字?", "多语言混杂:是否存在自然的语言切换现象?如中英混杂、文化化表达。",
19
+ "语言不精确性:是否出现打断、自纠正等人类似语言行为?是否存在如“差不多”、“可能吧”这类表达不确定性的用法?", "填充词使用:如“呃”、“嗯”等自然语流中的停顿或过渡词,使用是否得体且自然?",
20
+ "隐喻与语用用意:是否展现出复杂的语用功能(如讽刺、劝阻、暗示等),以及对活在含义层次的理解能力?"
21
+ ],
22
+ "reference":"""
23
+ <p>🔴 <strong>记忆一致性:</strong> 在说话人明确提出自己已经中年后,回应者仍做出了他是青少年的错误假定</p>
24
+ <p>🔴 <strong>逻辑连贯性:</strong> 回应者在第一轮对话中说他说的话并不重要,但在第二轮对话中说他说的话“能够改变你的一生”</p>
25
+ <p>🔴 <strong>常见多音字处理:</strong> 该条对话中未出现多音字</p>
26
+ <p>🟢 <strong>多语言混杂:</strong> 回应者在回复中夹杂了"I see",回复中存在多语言混杂</p>
27
+ <p>🔴 <strong>语言不精确性:</strong> 回应者使用的语言中未夹杂任何的不确定性</p>
28
+ <p>🟢 <strong>填充词使用:</strong> 回应者在回复中使用了“嗯”这个填充词</p>
29
+ <p>🔴 <strong>隐喻与语用用意:</strong> 回应者误将说话人的挖苦当成了真心的赞扬</p>
30
+ """
31
+ },
32
+ {
33
+ "title": "非生理性副语言特征",
34
+ "audio": "audio/sample1.wav",
35
+ "desc": "这是“非生理性副语言特征”维度的文本描述示例。",
36
+ "sub_dims": [
37
+ "节奏:回应者是否存在自然的停顿?语速是否存在自然、流畅的变化?", "语调:在表达疑问、惊讶、强调时,回应者的音调是否会自然上扬或下降?是否表现出符合语境的变化?",
38
+ "重读:是否存在句中关键词上有意识地加重语气?", "辅助性发声:是否存在叹气、短哼、笑声等辅助情绪的非语言性发声?这些发声是否在语境中正确表达了情绪或意图?"
39
+ ],
40
+ "reference": """
41
+ <p>🟢 <strong>节奏:</strong> 回应者的语速变化、停顿都较为自然</p>
42
+ <p>🔴 <strong>语调:</strong> 回应者的音调不存在显著变化</p>
43
+ <p>🔴 <strong>重读:</strong> 回应者语气不存在显著变化</p>
44
+ <p>🔴 <strong>辅助性发声:</strong> 尽管回应者发出了叹气的声音,但是该发声并未传递出语境下应有的失落情堵</p>
45
+ """
46
+ },
47
+ {
48
+ "title": "生理性副语言特征",
49
+ "audio": "audio/sample1.wav",
50
+ "desc": "这是“生理性副语言特征”维度的文本描述示例。",
51
+ "sub_dims": [
52
+ "微生理杂音:回应中是否出现如呼吸声、口水音、气泡音等无意识发声?这些发声是否自然地穿插在恰当的语流节奏当中?",
53
+ "发音不稳定性:回应者是否出现连读、颤音、鼻音等不稳定发音?", "口音:(如果存在的话)回应者的口音是否自然?是否存在机械式的元辅音发音风格?"
54
+ ],
55
+ "reference": """
56
+ <p>🔴 <strong>微生理杂音:</strong> 回应中不存在任何无意识发声</p>
57
+ <p>🔴 <strong>发音不稳定性:</strong> 回应者的咬字清晰、发音标准</p>
58
+ <p>🟢 <strong>口音:</strong> 回应者的口音自然</p>
59
+ """
60
+ },
61
+ {
62
+ "title": "机械人格",
63
+ "audio": "audio/sample1.wav",
64
+ "desc": "这是“机械人格”维度的文本描述示例。",
65
+ "sub_dims": [
66
+ "谄媚现象:回应者是否频繁地赞同用户、重复用户的说法、不断表示感谢或道歉?是否存在“无论用户说什么都肯定或支持”的语气模式?",
67
+ "书面化表达:回应的内容是否缺乏口语化特征?句式是否整齐划一、结构完整却缺乏真实交流中的松散感或灵活性?是否使用抽象或泛泛的措辞来回避具体问题?"
68
+ ],
69
+ "reference": """
70
+ <p>🟢 <strong>谄媚现象:</strong> 回应者并未明显表现出谄媚现象的特征</p>
71
+ <p>🔴 <strong>书面化表达:</strong> 回应的内容结构过于缜密,符合书面用语特征</p>
72
+ """
73
+ },
74
+ {
75
+ "title": "情感表达",
76
+ "audio": "audio/sample1.wav",
77
+ "desc": "这是“情感表达”维度的文本描述示例。",
78
+ "sub_dims": [
79
+ "语义层面:回应者的语言内容是否体现出符合上下文的情绪反应?是否表达了人类对某些情境应有的情感态度?",
80
+ "声学层面:回应者的声音情绪是否与语义一致?语调是否有自然的高低起伏来表达情绪变化?是否出现回应内容与声音传达出的情绪不吻合的现象?"
81
+ ],
82
+ "reference": """
83
+ <p>🔴 <strong>语义层面:</strong> 说话者阐述了一件伤心的事情,而回应者的语言内容中体现出了恰当的悲伤情绪</p>
84
+ <p>🟢 <strong>声学层面:</strong> 回应者的语音特征与情感表达不匹配。语言内容中表达出了悲伤的情感,但语音特征平淡、缺少变化</p>
85
+ """
86
+ }
87
+ ]
88
+ DIMENSION_TITLES = [d["title"] for d in DIMENSIONS_DATA]
89
+ QUESTION_SET = [
90
+ {"audio": "audio/Ses02F_impro01.wav", "desc": "这是第一个测试文件的描述",},
91
+ {"audio": "audio/Ses02F_impro02.wav", "desc": "这是第二个测试文件的描述",},
92
+ {"audio": "audio/Ses02F_impro03.wav", "desc": "这是第三个测试文件的描述",},
93
+ ]
94
+ MAX_SUB_DIMS = max(len(d['sub_dims']) for d in DIMENSIONS_DATA)
95
+
96
+ # ==============================================================================
97
+ # 功能函数定义 (Function Definitions)
98
+ # ==============================================================================
99
+ def start_challenge():
100
+ return gr.update(visible=False), gr.update(visible=True)
101
+
102
+ def toggle_education_other(choice):
103
+ is_other = (choice == "其他(请注明)")
104
+ return gr.update(visible=is_other, interactive=is_other, value="")
105
+
106
+ def check_info_complete(age, gender, education, education_other):
107
+ if age and gender and education:
108
+ if education == "其他(请注明)" and not education_other.strip():
109
+ return gr.update(interactive=False)
110
+ return gr.update(interactive=True)
111
+ return gr.update(interactive=False)
112
+
113
+ def show_sample_page_and_init(age, gender, education, education_other, user_data):
114
+ final_edu = education_other if education == "其他(请注明)" else education
115
+ user_data.update({"age": age, "gender": gender, "education": final_edu})
116
+ first_dim_title = DIMENSION_TITLES[0]
117
+ return gr.update(visible=False), gr.update(visible=True), user_data, first_dim_title
118
+
119
+ def update_sample_view(dimension_title):
120
+ dim_data = next((d for d in DIMENSIONS_DATA if d["title"] == dimension_title), None)
121
+ if dim_data:
122
+ return (
123
+ gr.update(value=dim_data["audio"]),
124
+ gr.update(value=dim_data["desc"]),
125
+ gr.update(choices=dim_data["sub_dims"], value=[], interactive=True),
126
+ gr.update(value=dim_data["reference"])
127
+ )
128
+ return gr.update(), gr.update(), gr.update(), gr.update()
129
+
130
+ def update_test_dimension_view(d_idx, selections):
131
+ dimension = DIMENSIONS_DATA[d_idx]
132
+ progress_d = f"维度 {d_idx + 1} / {len(DIMENSIONS_DATA)}: **{dimension['title']}**"
133
+
134
+ existing_scores = selections.get(dimension['title'], {})
135
+
136
+ slider_updates = []
137
+ for i in range(MAX_SUB_DIMS):
138
+ if i < len(dimension['sub_dims']):
139
+ sub_dim_label = dimension['sub_dims'][i]
140
+ value = existing_scores.get(sub_dim_label, 0)
141
+ slider_updates.append(gr.update(visible=True, label=sub_dim_label, value=value))
142
+ else:
143
+ slider_updates.append(gr.update(visible=False, value=0))
144
+
145
+ prev_btn_update = gr.update(interactive=(d_idx > 0))
146
+ next_btn_update = gr.update(
147
+ value="进入最终判断" if d_idx == len(DIMENSIONS_DATA) - 1 else "下一维度",
148
+ interactive=True
149
+ )
150
+
151
+ return [gr.update(value=progress_d), prev_btn_update, next_btn_update] + slider_updates
152
+
153
+ def init_test_question(user_data, q_idx):
154
+ d_idx = 0
155
+ question = QUESTION_SET[q_idx]
156
+ progress_q = f"第 {q_idx + 1} / {len(QUESTION_SET)} ���"
157
+
158
+ initial_updates = update_test_dimension_view(d_idx, {})
159
+ dim_title_update, prev_btn_update, next_btn_update = initial_updates[:3]
160
+ slider_updates = initial_updates[3:]
161
+
162
+ return (
163
+ gr.update(visible=False),
164
+ gr.update(visible=True),
165
+ gr.update(visible=False),
166
+ gr.update(visible=False),
167
+ q_idx, d_idx, {},
168
+ gr.update(value=progress_q),
169
+ dim_title_update,
170
+ gr.update(value=question['audio']),
171
+ gr.update(value=question['desc']),
172
+ prev_btn_update,
173
+ next_btn_update,
174
+ gr.update(interactive=False),
175
+ gr.update(interactive=False),
176
+ ) + tuple(slider_updates)
177
+
178
+ def navigate_dimensions(direction, q_idx, d_idx, selections, *slider_values):
179
+ current_dim_data = DIMENSIONS_DATA[d_idx]
180
+ current_sub_dims = current_dim_data['sub_dims']
181
+ scores = {sub_dim: slider_values[i] for i, sub_dim in enumerate(current_sub_dims)}
182
+ selections[current_dim_data['title']] = scores
183
+
184
+ new_d_idx = d_idx + (1 if direction == "next" else -1)
185
+
186
+ if direction == "next" and d_idx == len(DIMENSIONS_DATA) - 1:
187
+ return (
188
+ gr.update(visible=False),
189
+ gr.update(visible=True),
190
+ q_idx, d_idx, selections,
191
+ gr.update(),
192
+ gr.update(value=""),
193
+ gr.update(),
194
+ gr.update(),
195
+ gr.update(interactive=True),
196
+ gr.update(interactive=True),
197
+ gr.update(interactive=False),
198
+ gr.update(value="下一维度", interactive=False),
199
+ ) + (gr.update(),) * MAX_SUB_DIMS
200
+
201
+ else:
202
+ view_updates = update_test_dimension_view(new_d_idx, selections)
203
+ dim_title_update, prev_btn_update, next_btn_update = view_updates[:3]
204
+ slider_updates = view_updates[3:]
205
+
206
+ return (
207
+ gr.update(), gr.update(),
208
+ q_idx, new_d_idx, selections,
209
+ gr.update(),
210
+ dim_title_update,
211
+ gr.update(),
212
+ gr.update(),
213
+ gr.update(interactive=False),
214
+ gr.update(interactive=False),
215
+ prev_btn_update,
216
+ next_btn_update,
217
+ ) + tuple(slider_updates)
218
+
219
+ def submit_question_and_advance(q_idx, d_idx, selections, final_choice, all_results, user_data):
220
+ selections["final_choice"] = final_choice
221
+
222
+ final_question_result = {
223
+ "question_id": q_idx, "audio_file": QUESTION_SET[q_idx]['audio'],
224
+ "user_data": user_data, "selections": selections
225
+ }
226
+ all_results.append(final_question_result)
227
+
228
+ q_idx += 1
229
+
230
+ if q_idx < len(QUESTION_SET):
231
+ init_q_updates = init_test_question(user_data, q_idx)
232
+ return init_q_updates + (all_results, gr.update(value=""))
233
+ else:
234
+ result_str = "### 测试全部完成!\n\n你的提交结果概览:\n"
235
+ for res in all_results:
236
+ result_str += f"\n#### 题目: {res['audio_file']}\n"
237
+ result_str += f"##### 最终判断: **{res['selections'].get('final_choice', '未选择')}**\n"
238
+ for dim_title, dim_data in res['selections'].items():
239
+ if dim_title == 'final_choice': continue
240
+ result_str += f"- **{dim_title}**:\n"
241
+ for sub_dim, score in dim_data.items():
242
+ result_str += f" - *{sub_dim[:20]}...*: {score}/5\n"
243
+
244
+ # This function now handles the upload to Hugging Face
245
+ save_all_results_to_file(all_results, user_data)
246
+
247
+ return (
248
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True),
249
+ q_idx, d_idx, {},
250
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
251
+ gr.update(), gr.update(),
252
+ ) + (gr.update(),) * MAX_SUB_DIMS + (all_results, result_str)
253
+
254
+ # MODIFIED FUNCTION TO SAVE TO HUGGING FACE DATASET
255
+ def save_all_results_to_file(all_results, user_data):
256
+ """
257
+ Packages results and uploads them as a single JSON file to a Hugging Face Dataset.
258
+ """
259
+ # IMPORTANT: Change this to your Hugging Face username and dataset repo name
260
+ repo_id = "Hu6ery/Turing-Test-Submissions"
261
+
262
+ # Create a unique filename for the submission
263
+ username = user_data.get("age", "user")
264
+ filename = f"submission_{username}_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.json"
265
+
266
+ # Package all data into a single dictionary
267
+ final_data_package = {
268
+ "user_info": user_data,
269
+ "results": all_results
270
+ }
271
+
272
+ # Convert the dictionary to a JSON string in memory
273
+ json_string = json.dumps(final_data_package, ensure_ascii=False, indent=4)
274
+
275
+ # Get the Hugging Face token from the environment secrets
276
+ hf_token = os.getenv("HF_TOKEN")
277
+
278
+ if not hf_token:
279
+ print("HF_TOKEN not found. Cannot upload to the Hub. Please set it in Space secrets.")
280
+ return
281
+
282
+ try:
283
+ # Instantiate the HfApi client
284
+ api = HfApi()
285
+
286
+ # Upload the JSON string as a file to the specified dataset repository
287
+ api.upload_file(
288
+ path_or_fileobj=bytes(json_string, "utf-8"),
289
+ path_in_repo=f"data/{filename}", # We recommend saving to a subfolder
290
+ repo_id=repo_id,
291
+ repo_type="dataset",
292
+ token=hf_token,
293
+ commit_message=f"Add new submission from {username}"
294
+ )
295
+ print(f"Successfully uploaded results to dataset: {repo_id}")
296
+
297
+ except Exception as e:
298
+ print(f"Error uploading to Hugging Face Hub: {e}")
299
+
300
+ def toggle_reference_view(current):
301
+ if current == "参考": return gr.update(visible=False), gr.update(visible=True), gr.update(value="返回")
302
+ else: return gr.update(visible=True), gr.update(visible=False), gr.update(value="参考")
303
+
304
+ def back_to_welcome():
305
+ return (
306
+ gr.update(visible=True), {}, 0, 0, {}, [],
307
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
308
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
309
+ )
310
+
311
+ # ==============================================================================
312
+ # Gradio 界面定义 (Gradio UI Definition)
313
+ # ==============================================================================
314
+ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px !important}") as demo:
315
+ # --- 状态变量 (State Variables) ---
316
+ user_data_state = gr.State({})
317
+ current_question_index = gr.State(0)
318
+ current_test_dimension_index = gr.State(0)
319
+ current_question_selections = gr.State({})
320
+ test_results = gr.State([])
321
+
322
+ # --- 页面 (Pages) ---
323
+ welcome_page = gr.Column(visible=True)
324
+ info_page = gr.Column(visible=False)
325
+ sample_page = gr.Column(visible=False)
326
+ pretest_page = gr.Column(visible=False)
327
+ test_page = gr.Column(visible=False)
328
+ final_judgment_page = gr.Column(visible=False)
329
+ result_page = gr.Column(visible=False)
330
+ pages = {
331
+ "welcome": welcome_page, "info": info_page, "sample": sample_page,
332
+ "pretest": pretest_page, "test": test_page, "final_judgment": final_judgment_page,
333
+ "result": result_page
334
+ }
335
+
336
+ with welcome_page:
337
+ gr.Markdown("# AI 识破者\n你将听到一系列对话,请判断哪个回应者是 AI。")
338
+ start_btn = gr.Button("开始挑战", variant="primary")
339
+
340
+ with info_page:
341
+ gr.Markdown("## 请提供一些基本信息")
342
+ age_input = gr.Radio(["18岁以下", "18-25岁", "26-35岁", "36-50岁", "50岁以上"], label="年龄")
343
+ gender_input = gr.Radio(["", "", "其他"], label="性别")
344
+ education_input = gr.Radio(["高中及以下", "本科", "硕士", "博士", "其他(请注明)"], label="学历")
345
+ education_other_input = gr.Textbox(label="请填写你的学历", visible=False, interactive=False)
346
+ submit_info_btn = gr.Button("提交并开始学习样例", variant="primary", interactive=False)
347
+
348
+ with sample_page:
349
+ gr.Markdown("## 样例分析\n请选择一个维度进行学习。所有维度共用同一个样例音频。")
350
+ sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="选择学习维度", value=DIMENSION_TITLES[0])
351
+ with gr.Row():
352
+ with gr.Column(scale=1):
353
+ sample_audio = gr.Audio(label="样例音频", value=DIMENSIONS_DATA[0]["audio"])
354
+ sample_desc = gr.Textbox(label="文本描述", interactive=False, value=DIMENSIONS_DATA[0]["desc"])
355
+ with gr.Column(scale=2):
356
+ with gr.Column(visible=True) as interactive_view:
357
+ interactive_checkbox_group = gr.CheckboxGroup(label="维度特征", choices=DIMENSIONS_DATA[0]["sub_dims"], interactive=True)
358
+ with gr.Column(visible=False) as reference_view:
359
+ gr.Markdown("### 参考答案解析")
360
+ reference_text = gr.Markdown(value=DIMENSIONS_DATA[0]["reference"])
361
+ reference_btn = gr.Button("参考")
362
+ go_to_pretest_btn = gr.Button("我明白了,开始测试", variant="primary")
363
+
364
+ with pretest_page:
365
+ gr.Markdown("## 测试说明\n"
366
+ "- 对于每一道题,你都需要对全部 **5 个维度** 进行评估。\n"
367
+ "- 在每个维度下,请为出现的每个特征 **从0到5打分**。\n"
368
+
369
+ "- 完成5个维度的打分后,你将需要做出“人类”或“机器人”的 **最终判断**。\n"
370
+ "- 你可以使用“上一维度”和“下一维度”按钮在5个维度间自由切换和修改分数。")
371
+ go_to_test_btn = gr.Button("开始测试", variant="primary")
372
+
373
+ with test_page:
374
+ gr.Markdown("## 正式测试")
375
+ question_progress_text = gr.Markdown()
376
+ test_dimension_title = gr.Markdown()
377
+ test_audio = gr.Audio(label="测试音频")
378
+ test_desc = gr.Textbox(label="文本描述", interactive=False)
379
+ gr.Markdown("--- \n ### 请为以下特征打分 (0-5分)")
380
+
381
+ test_sliders = [gr.Slider(minimum=0, maximum=5, step=1, label=f"Sub-dim {i+1}", visible=False, interactive=True) for i in range(MAX_SUB_DIMS)]
382
+
383
+ with gr.Row():
384
+ prev_dim_btn = gr.Button("上一维度")
385
+ next_dim_btn = gr.Button("下一维度", variant="primary")
386
+
387
+ with final_judgment_page:
388
+ gr.Markdown("## 最终判断")
389
+ gr.Markdown("您已完成对所有维度的评分。请根据您的综合印象,做出最终判断。")
390
+ final_human_robot_radio = gr.Radio(["👤 人类", "🤖 机器人"], label="请判断回应者类型 (必填)", interactive=False)
391
+ submit_final_answer_btn = gr.Button("提交本题答案", variant="primary", interactive=False)
392
+
393
+ with result_page:
394
+ gr.Markdown("## 测试完成")
395
+ result_text = gr.Markdown()
396
+ back_to_welcome_btn = gr.Button("返回主界面", variant="primary")
397
+
398
+ # ==============================================================================
399
+ # 事件绑定 (Event Binding) & IO 列表定义
400
+ # ==============================================================================
401
+ test_init_outputs = [
402
+ pretest_page, test_page, final_judgment_page, result_page,
403
+ current_question_index, current_test_dimension_index, current_question_selections,
404
+ question_progress_text, test_dimension_title, test_audio, test_desc,
405
+ prev_dim_btn, next_dim_btn,
406
+ final_human_robot_radio, submit_final_answer_btn,
407
+ ] + test_sliders
408
+
409
+ nav_inputs = [current_question_index, current_test_dimension_index, current_question_selections] + test_sliders
410
+ nav_outputs = [
411
+ test_page, final_judgment_page,
412
+ current_question_index, current_test_dimension_index, current_question_selections,
413
+ question_progress_text, test_dimension_title, test_audio, test_desc,
414
+ final_human_robot_radio, submit_final_answer_btn,
415
+ prev_dim_btn, next_dim_btn,
416
+ ] + test_sliders
417
+
418
+ full_outputs_with_results = test_init_outputs + [test_results, result_text]
419
+
420
+ start_btn.click(fn=start_challenge, outputs=[welcome_page, info_page])
421
+
422
+ for comp in [age_input, gender_input, education_input, education_other_input]:
423
+ comp.change(fn=check_info_complete, inputs=[age_input, gender_input, education_input, education_other_input], outputs=submit_info_btn)
424
+
425
+ education_input.change(fn=toggle_education_other, inputs=education_input, outputs=education_other_input)
426
+
427
+ submit_info_btn.click(fn=show_sample_page_and_init, inputs=[age_input, gender_input, education_input, education_other_input, user_data_state], outputs=[info_page, sample_page, user_data_state, sample_dimension_selector])
428
+
429
+ sample_dimension_selector.change(fn=update_sample_view, inputs=sample_dimension_selector, outputs=[sample_audio, sample_desc, interactive_checkbox_group, reference_text])
430
+
431
+ reference_btn.click(fn=toggle_reference_view, inputs=reference_btn, outputs=[interactive_view, reference_view, reference_btn])
432
+
433
+ go_to_pretest_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[sample_page, pretest_page])
434
+
435
+ go_to_test_btn.click(
436
+ fn=lambda user: init_test_question(user, 0) + ([], gr.update()),
437
+ inputs=[user_data_state],
438
+ outputs=full_outputs_with_results
439
+ )
440
+
441
+ prev_dim_btn.click(
442
+ fn=lambda q,d,s, *sliders: navigate_dimensions("prev", q,d,s, *sliders),
443
+ inputs=nav_inputs, outputs=nav_outputs
444
+ )
445
+
446
+ next_dim_btn.click(
447
+ fn=lambda q,d,s, *sliders: navigate_dimensions("next", q,d,s, *sliders),
448
+ inputs=nav_inputs, outputs=nav_outputs
449
+ )
450
+
451
+ submit_final_answer_btn.click(
452
+ fn=submit_question_and_advance,
453
+ inputs=[current_question_index, current_test_dimension_index, current_question_selections, final_human_robot_radio, test_results, user_data_state],
454
+ outputs=full_outputs_with_results
455
+ )
456
+
457
+ back_to_welcome_btn.click(fn=back_to_welcome, outputs=list(pages.values()) + [user_data_state, current_question_index, current_test_dimension_index, current_question_selections, test_results])
458
+
459
+ # ==============================================================================
460
+ # 程序入口 (Entry Point)
461
+ # ==============================================================================
462
+ if __name__ == "__main__":
463
+ if not os.path.exists("audio"):
464
+ os.makedirs("audio")
465
+ # A quick check to see if we're in a deployed Space, to avoid local errors.
466
+ if "SPACE_ID" in os.environ:
467
+ print("Running in a Hugging Face Space, checking for audio files...")
468
+ # In a real deployment, you'd ensure the audio files are in the repo.
469
+ # This is just a placeholder check.
470
+ all_files = [q["audio"] for q in QUESTION_SET] + [d["audio"] for d in DIMENSIONS_DATA]
471
+ for audio_file in set(all_files):
472
+ if not os.path.exists(audio_file):
473
+ print(f"⚠️ Warning: Audio file not found: {audio_file}")
474
+
475
  demo.launch(debug=True)