xiaohy commited on
Commit
907530f
·
verified ·
1 Parent(s): 51e503d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +426 -464
app.py CHANGED
@@ -35,524 +35,486 @@ config = load_json("config.json")
35
  plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
36
  plt.rcParams['axes.unicode_minus'] = False
37
 
38
- bl_auc = mia_results.get('baseline', {}).get('auc', 0)
39
- s002_auc = mia_results.get('smooth_0.02', {}).get('auc', 0)
40
- s02_auc = mia_results.get('smooth_0.2', {}).get('auc', 0)
41
- op001_auc = perturb_results.get('perturbation_0.01', {}).get('auc', 0)
42
- op0015_auc = perturb_results.get('perturbation_0.015', {}).get('auc', 0)
43
- op002_auc = perturb_results.get('perturbation_0.02', {}).get('auc', 0)
 
 
 
 
 
 
 
 
 
44
  bl_acc = utility_results.get('baseline', {}).get('accuracy', 0) * 100
45
  s002_acc = utility_results.get('smooth_0.02', {}).get('accuracy', 0) * 100
46
  s02_acc = utility_results.get('smooth_0.2', {}).get('accuracy', 0) * 100
47
- bl_m_mean = mia_results.get('baseline', {}).get('member_loss_mean', 0.19)
48
- bl_nm_mean = mia_results.get('baseline', {}).get('non_member_loss_mean', 0.23)
49
- bl_m_std = mia_results.get('baseline', {}).get('member_loss_std', 0.03)
50
- bl_nm_std = mia_results.get('baseline', {}).get('non_member_loss_std', 0.03)
51
- s002_m_mean = mia_results.get('smooth_0.02', {}).get('member_loss_mean', 0.20)
52
- s002_nm_mean = mia_results.get('smooth_0.02', {}).get('non_member_loss_mean', 0.22)
53
- s002_m_std = mia_results.get('smooth_0.02', {}).get('member_loss_std', 0.03)
54
- s002_nm_std = mia_results.get('smooth_0.02', {}).get('non_member_loss_std', 0.03)
55
- s02_m_mean = mia_results.get('smooth_0.2', {}).get('member_loss_mean', 0.21)
56
- s02_nm_mean = mia_results.get('smooth_0.2', {}).get('non_member_loss_mean', 0.22)
57
- s02_m_std = mia_results.get('smooth_0.2', {}).get('member_loss_std', 0.03)
58
- s02_nm_std = mia_results.get('smooth_0.2', {}).get('non_member_loss_std', 0.03)
59
- model_name_str = config.get('model_name', 'Qwen/Qwen2.5-Math-1.5B-Instruct')
60
- data_size_str = str(config.get('data_size', 2000))
61
-
62
- MODEL_PARAMS = {
63
- "baseline": {"m_mean": bl_m_mean, "nm_mean": bl_nm_mean, "m_std": bl_m_std, "nm_std": bl_nm_std, "key": "baseline", "label": "Baseline"},
64
- "smooth_0.02": {"m_mean": s002_m_mean, "nm_mean": s002_nm_mean, "m_std": s002_m_std, "nm_std": s002_nm_std, "key": "smooth_0.02", "label": "LS(e=0.02)"},
65
- "smooth_0.2": {"m_mean": s02_m_mean, "nm_mean": s02_nm_mean, "m_std": s02_m_std, "nm_std": s02_nm_std, "key": "smooth_0.2", "label": "LS(e=0.2)"},
 
66
  }
67
 
68
- # 预生成效用测试题库(中文300道题)
69
- EVAL_QUESTIONS = []
70
- eval_types = ['calculation'] * 120 + ['word_problem'] * 90 + ['concept'] * 60 + ['error_correction'] * 30
71
- np.random.seed(777)
72
  TYPE_CN = {'calculation': '基础计算', 'word_problem': '应用题', 'concept': '概念问答', 'error_correction': '错题订正'}
73
- for i in range(300):
74
- t = eval_types[i]
75
- if t == 'calculation':
76
- a, b = int(np.random.randint(10, 500)), int(np.random.randint(10, 500))
77
- ops = ['+', '-', 'x']
78
- op = ops[i % 3]
79
- if op == '+':
80
- q = f"请计算: {a} + {b} = ?"
81
- ans = str(a + b)
82
- elif op == '-':
83
- q = f"请计算: {a} - {b} = ?"
84
- ans = str(a - b)
85
- else:
86
- q = f"请计算: {a} x {b} = ?"
87
- ans = str(a * b)
88
- elif t == 'word_problem':
89
- a, b = int(np.random.randint(5, 200)), int(np.random.randint(3, 50))
90
- c = int(np.random.randint(5, 50))
91
- templates = [
92
- (f"小明有{a}个苹果,吃掉了{b}个,还剩多少个?", str(a - b)),
93
- (f"每组有{a}人,共{b}组,一共多少人?", str(a * b)),
94
- (f"图书馆有{a}��书,借出{b}本后又买了{c}本,现在有多少本?", str(a - b + c)),
95
- (f"商店有{a}支铅笔,卖出{b}支,还剩多少支?", str(a - b)),
96
- (f"小红有{a}颗糖,小明给了她{b}颗,现在有多少颗?", str(a + b)),
97
- ]
98
- q, ans = templates[i % len(templates)]
99
- elif t == 'concept':
100
- concepts = [
101
- ("面积", "面积是指一个平面图形所占平面的大小,常用单位有平方厘米、平方米等。"),
102
- ("周长", "周长是指围绕一个封闭图形边线一周的总长度。"),
103
- ("分数", "分数表示一个整体被等分后取其中若干份,由分子和分母组成。"),
104
- ("小数", "小数是用小数点来表示比1小的数或整数与真分数之和的数。"),
105
- ("平均数", "平均数是一组数据的总和除以数据的个数所得到的商。"),
106
- ]
107
- concept, definition = concepts[i % len(concepts)]
108
- q = f"请解释什么是{concept}?"
109
- ans = definition
110
  else:
111
- a, b = int(np.random.randint(10, 99)), int(np.random.randint(10, 99))
112
- correct = a + b
113
- wrong = correct + int(np.random.choice([-1, 1, -10, 10]))
114
- q = f"有同学算 {a} + {b} = {wrong},请问正确答案是多少?"
115
- ans = str(correct)
116
- bl_correct = bool(np.random.random() < (bl_acc / 100))
117
- s002_correct = bool(np.random.random() < (s002_acc / 100))
118
- s02_correct = bool(np.random.random() < (s02_acc / 100))
119
- EVAL_QUESTIONS.append({
120
- 'question': q, 'answer': ans, 'type': t, 'type_cn': TYPE_CN[t],
121
- 'baseline': bl_correct, 'smooth_0.02': s002_correct, 'smooth_0.2': s02_correct
122
- })
123
  # ========================================
124
- # Charts
125
  # ========================================
126
 
127
- def make_loss_distribution():
128
- items = []
129
- for k, t in [('baseline', 'Baseline'), ('smooth_0.02', 'LS(e=0.02)'), ('smooth_0.2', 'LS(e=0.2)')]:
130
- if k in full_results:
131
- auc = mia_results.get(k, {}).get('auc', 0)
132
- items.append((k, t + "\nAUC=" + f"{auc:.4f}"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  n = len(items)
134
- if n == 0:
135
- fig, ax = plt.subplots(); ax.text(0.5, 0.5, 'No data', ha='center'); return fig
136
- fig, axes = plt.subplots(1, n, figsize=(6.5 * n, 5.5))
137
  if n == 1: axes = [axes]
138
- for ax, (k, title) in zip(axes, items):
139
- m = full_results[k]['member_losses']; nm_l = full_results[k]['non_member_losses']
140
- bins = np.linspace(min(min(m), min(nm_l)), max(max(m), max(nm_l)), 30)
141
- ax.hist(m, bins=bins, alpha=0.55, color='#5B8FF9', label='Member', density=True)
142
- ax.hist(nm_l, bins=bins, alpha=0.55, color='#E86452', label='Non-Member', density=True)
143
- ax.set_title(title, fontsize=13, fontweight='bold')
144
- ax.set_xlabel('Loss', fontsize=11); ax.set_ylabel('Density', fontsize=11)
145
- ax.legend(fontsize=10); ax.tick_params(labelsize=10)
146
- ax.grid(True, linestyle='--', alpha=0.3)
147
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
148
- plt.tight_layout()
149
- return fig
150
-
151
-
152
- def make_perturb_loss_distribution():
153
- bl = full_results.get('baseline', {})
154
- if not bl:
155
- fig, ax = plt.subplots(); ax.text(0.5, 0.5, 'No data', ha='center'); return fig
156
- m_losses = np.array(bl['member_losses']); nm_losses = np.array(bl['non_member_losses'])
157
- fig, axes = plt.subplots(1, 3, figsize=(19.5, 5.5))
158
- for ax, sigma in zip(axes, [0.01, 0.015, 0.02]):
159
- np.random.seed(42)
160
- m_pert = m_losses + np.random.normal(0, sigma, len(m_losses))
161
- np.random.seed(43)
162
- nm_pert = nm_losses + np.random.normal(0, sigma, len(nm_losses))
163
- vals = np.concatenate([m_pert, nm_pert])
164
- bins = np.linspace(vals.min(), vals.max(), 30)
165
- ax.hist(m_pert, bins=bins, alpha=0.55, color='#5B8FF9', label='Member+noise', density=True)
166
- ax.hist(nm_pert, bins=bins, alpha=0.55, color='#E86452', label='Non-Member+noise', density=True)
167
- pk = 'perturbation_' + str(sigma)
168
- pauc = perturb_results.get(pk, {}).get('auc', 0)
169
- ax.set_title(f'OP(s={sigma})\nAUC={pauc:.4f}', fontsize=13, fontweight='bold')
170
- ax.set_xlabel('Loss', fontsize=11); ax.set_ylabel('Density', fontsize=11)
171
- ax.legend(fontsize=9); ax.tick_params(labelsize=10)
172
- ax.grid(True, linestyle='--', alpha=0.3)
173
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
174
- plt.tight_layout()
175
- return fig
176
-
177
-
178
- def make_auc_bar():
179
- methods, aucs, colors = [], [], []
180
- for k, n, c in [('baseline', 'Baseline', '#8C8C8C'), ('smooth_0.02', 'LS(e=0.02)', '#5B8FF9'),
181
- ('smooth_0.2', 'LS(e=0.2)', '#3D76DD')]:
182
- if k in mia_results: methods.append(n); aucs.append(mia_results[k]['auc']); colors.append(c)
183
- for k, n, c in [('perturbation_0.01', 'OP(s=0.01)', '#5AD8A6'), ('perturbation_0.015', 'OP(s=0.015)', '#2EAD78'),
184
- ('perturbation_0.02', 'OP(s=0.02)', '#1A7F5A')]:
185
- if k in perturb_results: methods.append(n); aucs.append(perturb_results[k]['auc']); colors.append(c)
186
- fig, ax = plt.subplots(figsize=(12, 6))
187
- bars = ax.bar(methods, aucs, color=colors, width=0.5, edgecolor='white', linewidth=1.5)
188
- for bar, a in zip(bars, aucs):
189
- ax.text(bar.get_x()+bar.get_width()/2, bar.get_height()+0.002, f'{a:.4f}', ha='center', va='bottom', fontsize=11, fontweight='bold')
190
- ax.axhline(y=0.5, color='#E86452', linestyle='--', linewidth=1.5, alpha=0.6, label='Random Guess (0.5)')
191
- ax.set_ylabel('MIA AUC', fontsize=12); ax.set_ylim(0.48, max(aucs)+0.035)
192
- ax.legend(fontsize=10); ax.grid(axis='y', linestyle='--', alpha=0.3)
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
194
- plt.xticks(fontsize=11); plt.tight_layout()
195
- return fig
196
 
197
 
198
- def make_tradeoff():
199
- fig, ax = plt.subplots(figsize=(10, 7))
200
  pts = []
201
- for k, n, mk, c, sz in [('baseline','Baseline','o','#8C8C8C',220), ('smooth_0.02','LS(e=0.02)','s','#5B8FF9',200), ('smooth_0.2','LS(e=0.2)','s','#3D76DD',200)]:
202
- if k in mia_results and k in utility_results:
203
- pts.append({'n':n,'a':mia_results[k]['auc'],'c':utility_results[k]['accuracy'],'m':mk,'co':c,'s':sz})
204
- ba = utility_results.get('baseline',{}).get('accuracy',0.633)
205
- for k, n, mk, c, sz in [('perturbation_0.01','OP(s=0.01)','^','#5AD8A6',200), ('perturbation_0.015','OP(s=0.015)','D','#2EAD78',160), ('perturbation_0.02','OP(s=0.02)','^','#1A7F5A',200)]:
206
- if k in perturb_results: pts.append({'n':n,'a':perturb_results[k]['auc'],'c':ba,'m':mk,'co':c,'s':sz})
207
- for p in pts:
208
- ax.scatter(p['c'], p['a'], label=p['n'], marker=p['m'], color=p['co'], s=p['s'], edgecolors='white', linewidth=2, zorder=5)
209
- ax.axhline(y=0.5, color='#BFBFBF', linestyle='--', alpha=0.8, label='Random Guess')
210
- ax.set_xlabel('Accuracy', fontsize=12, fontweight='bold'); ax.set_ylabel('MIA AUC', fontsize=12, fontweight='bold')
211
- ax.set_title('Privacy-Utility Trade-off', fontsize=14, fontweight='bold')
212
- aa=[p['c'] for p in pts]; ab=[p['a'] for p in pts]
213
- if aa and ab: ax.set_xlim(min(aa)-0.03,max(aa)+0.05); ax.set_ylim(min(min(ab),0.5)-0.02,max(ab)+0.025)
214
- ax.legend(loc='upper right', fontsize=9); ax.grid(True, alpha=0.2)
215
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
216
  plt.tight_layout(); return fig
217
 
218
 
219
- def make_accuracy_bar():
220
- names, accs, colors = [], [], []
221
- for k, n, c in [('baseline','Baseline','#8C8C8C'), ('smooth_0.02','LS(e=0.02)','#5B8FF9'), ('smooth_0.2','LS(e=0.2)','#3D76DD')]:
222
- if k in utility_results: names.append(n); accs.append(utility_results[k]['accuracy']*100); colors.append(c)
223
- bp = utility_results.get('baseline',{}).get('accuracy',0)*100
224
- for k, n, c in [('perturbation_0.01','OP(s=0.01)','#5AD8A6'), ('perturbation_0.015','OP(s=0.015)','#2EAD78'), ('perturbation_0.02','OP(s=0.02)','#1A7F5A')]:
225
- if k in perturb_results: names.append(n); accs.append(bp); colors.append(c)
226
- fig, ax = plt.subplots(figsize=(12, 6))
227
- bars = ax.bar(names, accs, color=colors, width=0.5, edgecolor='white', linewidth=1.5)
228
- for bar, acc in zip(bars, accs):
229
- ax.text(bar.get_x()+bar.get_width()/2, bar.get_height()+0.5, f'{acc:.1f}%', ha='center', va='bottom', fontsize=11, fontweight='bold')
230
- ax.set_ylabel('Accuracy (%)', fontsize=12); ax.set_ylim(0, 100)
231
- ax.grid(axis='y', alpha=0.3); ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
232
- plt.xticks(fontsize=11); plt.tight_layout(); return fig
233
-
234
-
235
- def make_loss_gauge(loss_val, m_mean, nm_mean, threshold, m_std, nm_std):
236
- fig, ax = plt.subplots(figsize=(9, 3))
237
- x_min = min(m_mean-3*m_std, loss_val-0.01); x_max = max(nm_mean+3*nm_std, loss_val+0.01)
238
- ax.axvspan(x_min, threshold, alpha=0.12, color='#5B8FF9')
239
- ax.axvspan(threshold, x_max, alpha=0.12, color='#E86452')
240
- ax.axvline(x=threshold, color='#434343', linewidth=2, zorder=3)
241
- ax.text(threshold, 1.12, 'Threshold', ha='center', va='bottom', fontsize=10, fontweight='bold', color='#434343', transform=ax.get_xaxis_transform())
242
- ax.axvline(x=m_mean, color='#5B8FF9', linewidth=1.2, linestyle='--', alpha=0.6)
243
- ax.text(m_mean, -0.3, f'Member\n({m_mean:.4f})', ha='center', va='top', fontsize=8, color='#5B8FF9', transform=ax.get_xaxis_transform())
244
- ax.axvline(x=nm_mean, color='#E86452', linewidth=1.2, linestyle='--', alpha=0.6)
245
- ax.text(nm_mean, -0.3, f'Non-Mem\n({nm_mean:.4f})', ha='center', va='top', fontsize=8, color='#E86452', transform=ax.get_xaxis_transform())
246
- mc = '#5B8FF9' if loss_val < threshold else '#E86452'
247
- ax.plot(loss_val, 0.5, marker='v', markersize=16, color=mc, zorder=5, transform=ax.get_xaxis_transform())
248
- ax.text(loss_val, 0.78, f'Loss={loss_val:.4f}', ha='center', va='bottom', fontsize=11, fontweight='bold', color=mc, transform=ax.get_xaxis_transform(),
249
- bbox=dict(boxstyle='round,pad=0.3', facecolor='white', edgecolor=mc, alpha=0.95))
250
- ax.text((x_min+threshold)/2, 0.5, 'Member Zone', ha='center', va='center', fontsize=11, color='#5B8FF9', fontweight='bold', alpha=0.5, transform=ax.get_xaxis_transform())
251
- ax.text((threshold+x_max)/2, 0.5, 'Non-Member Zone', ha='center', va='center', fontsize=11, color='#E86452', fontweight='bold', alpha=0.5, transform=ax.get_xaxis_transform())
252
- ax.set_xlim(x_min, x_max); ax.set_yticks([])
253
- for sp in ['top','right','left']: ax.spines[sp].set_visible(False)
254
- ax.set_xlabel('Loss Value', fontsize=10); plt.tight_layout(); return fig
255
-
256
-
257
  # ========================================
258
- # Callbacks
259
  # ========================================
260
 
261
- def show_random_sample(data_type):
262
- data = member_data if data_type == "成员数据(训练集)" else non_member_data
263
- sample = data[np.random.randint(0, len(data))]
264
- meta = sample['metadata']
265
- task_map = {'calculation':'基础计算','word_problem':'应用题','concept':'概念问答','error_correction':'错题订正'}
266
- info_md = ("**截获的隐私元数据**\n\n"
267
- "- **姓名**: " + clean_text(str(meta.get('name',''))) + "\n"
268
- "- **学号**: " + clean_text(str(meta.get('student_id',''))) + "\n"
269
- "- **班级**: " + clean_text(str(meta.get('class',''))) + "\n"
270
- "- **成绩**: " + clean_text(str(meta.get('score',''))) + " 分\n"
271
- "- **类型**: " + task_map.get(sample.get('task_type',''),'') + "\n")
272
- return info_md, clean_text(sample.get('question','')), clean_text(sample.get('answer',''))
273
-
274
-
275
- MODEL_CHOICE_MAP = {
276
- "基线模型 (Baseline)": "baseline",
277
- "标签平滑模型 (e=0.02)": "smooth_0.02",
278
- "标签平滑模型 (e=0.2)": "smooth_0.2",
279
- "输出扰动 (s=0.01)": "perturbation_0.01",
280
- "输出扰动 (s=0.015)": "perturbation_0.015",
281
- "输出扰动 (s=0.02)": "perturbation_0.02",
282
- }
283
-
284
-
285
- def run_mia_demo(sample_index, data_type, model_choice):
286
- is_member = (data_type == "成员数据(训练集)")
287
- data = member_data if is_member else non_member_data
288
- idx = min(int(sample_index), len(data)-1)
289
- sample = data[idx]
290
- model_key = MODEL_CHOICE_MAP.get(model_choice, "baseline")
291
- is_perturb = model_key.startswith("perturbation_")
292
-
293
- if is_perturb:
294
- sigma = float(model_key.split("_")[1])
295
- base_fr = full_results.get('baseline', {})
296
- losses_key = 'member_losses' if is_member else 'non_member_losses'
297
- if idx < len(base_fr.get(losses_key, [])):
298
- base_loss = base_fr[losses_key][idx]
299
- else:
300
- base_loss = float(np.random.normal(bl_m_mean if is_member else bl_nm_mean, 0.02))
301
  np.random.seed(idx * 1000 + int(sigma * 1000))
302
  loss = base_loss + np.random.normal(0, sigma)
303
- m_mean, nm_mean, m_std_v, nm_std_v = bl_m_mean, bl_nm_mean, bl_m_std, bl_nm_std
304
- model_auc = perturb_results.get(model_key, {}).get('auc', 0)
305
- display_label = "OP(s=" + str(sigma) + ")"
306
  else:
307
- params = MODEL_PARAMS.get(model_key, MODEL_PARAMS["baseline"])
308
- fr = full_results.get(model_key, full_results.get('baseline', {}))
309
- losses_key = 'member_losses' if is_member else 'non_member_losses'
310
- if idx < len(fr.get(losses_key, [])):
311
- loss = fr[losses_key][idx]
312
- else:
313
- loss = float(np.random.normal(params['m_mean'] if is_member else params['nm_mean'], 0.02))
314
- m_mean, nm_mean = params['m_mean'], params['nm_mean']
315
- m_std_v, nm_std_v = params['m_std'], params['nm_std']
316
- model_auc = mia_results.get(model_key, {}).get('auc', 0)
317
- display_label = params['label']
318
-
319
- threshold = (m_mean + nm_mean) / 2.0
320
- pred_member = (loss < threshold)
321
- attack_correct = (pred_member == is_member)
322
- gauge_fig = make_loss_gauge(loss, m_mean, nm_mean, threshold, m_std_v, nm_std_v)
323
-
324
- pl = "训练员" if pred_member else "非训练成员"
325
- pc = "🔴" if pred_member else "🟢"
326
- al = "训练成员" if is_member else "非训练成员"
327
- ac = "🔴" if is_member else "🟢"
328
-
329
- if attack_correct and pred_member and is_member:
330
- v = "⚠️ **攻击成功: 发生了隐私泄露**"; vd = "模型对该样本过于熟悉(Loss低于阈值),攻击者成功判定其为训练集数据。"
331
- elif attack_correct:
332
- v = "✅ **判断正确**"; vd = "攻击者的判定与真实身份一致。"
333
  else:
334
- v = " **攻击失误**"; vd = "攻击者的判定与真实身份不符。"
335
-
336
- result_md = (v + "\n\n" + vd + "\n\n"
337
- "**当前攻击模型**: " + display_label + " (AUC=" + f"{model_auc:.4f}" + ")\n\n"
338
- "| | 攻击者计算得出 | 系统真实身份 |\n|---|---|---|\n"
339
- "| 判定 | " + pc + " " + pl + " | " + ac + " " + al + " |\n"
340
- "| Loss | " + f"{loss:.4f}" + " | Threshold: " + f"{threshold:.4f}" + " |\n")
341
- q_text = "**样本追踪号 [" + str(idx) + "] :**\n\n" + clean_text(sample.get('question',''))[:500]
342
- return q_text, gauge_fig, result_md
343
-
344
-
345
- EVAL_MODEL_MAP = {
346
- "基线模型 (Baseline)": "baseline",
347
- "标签平滑模型 (e=0.02)": "smooth_0.02",
348
- "标签平滑模型 (e=0.2)": "smooth_0.2",
349
- "输出扰动 (s=0.01)": "baseline",
350
- "输出扰动 (s=0.015)": "baseline",
351
- "输出扰动 (s=0.02)": "baseline",
352
- }
353
 
354
- EVAL_ACC_MAP = {
355
- "基线模型 (Baseline)": bl_acc,
356
- "标签平滑模型 (e=0.02)": s002_acc,
357
- "标签平滑模型 (e=0.2)": s02_acc,
358
- "输出扰动 (s=0.01)": bl_acc,
359
- "输出扰动 (s=0.015)": bl_acc,
360
- "输出扰动 (s=0.02)": bl_acc,
361
- }
362
 
363
 
364
- def run_eval_demo(eval_model):
365
- model_key = EVAL_MODEL_MAP.get(eval_model, "baseline")
366
- overall_acc = EVAL_ACC_MAP.get(eval_model, bl_acc)
367
- idx = np.random.randint(0, len(EVAL_QUESTIONS))
368
- q = EVAL_QUESTIONS[idx]
369
- is_correct = q.get(model_key, q.get('baseline', False))
370
- icon = "✅" if is_correct else "❌"
371
- result_md = (
372
- "### 测试结果\n\n"
373
- "**模型**: " + eval_model + " (总体准确率: " + f"{overall_acc:.1f}" + "%)\n\n"
374
- "| 项目 | 内容 |\n|---|---|\n"
375
- "| 题目编号 | #" + str(idx+1) + " / 300 |\n"
376
- "| 题目类| " + q.get('type_cn', q['type']) + " |\n"
377
- "| 题目 | " + q['question'] + " |\n"
378
- "| 正确答案 | " + q['answer'] + " |\n"
379
- "| 判定 | " + icon + " " + ("正确" if is_correct else "错误") + " |\n\n")
380
- if eval_model.startswith("输出扰动"):
381
- result_md += "> 输出扰动不改变模型参数,因此准确率与基线完全一致。\n"
382
- return result_md
383
 
384
 
385
  # ========================================
386
- # Interface
387
  # ========================================
388
 
389
  CSS = """
390
- body { background-color: #f0f4f8 !important; }
391
- .gradio-container { max-width: 1200px !important; margin: auto !important; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "PingFang SC", "Microsoft YaHei", sans-serif !important; }
392
- .tab-nav { border-bottom: 2px solid #e1e8f0 !important; margin-bottom: 20px !important; }
393
- .tab-nav button { font-size: 15px !important; padding: 14px 22px !important; font-weight: 500 !important; color: #64748b !important; border-radius: 8px 8px 0 0 !important; background: transparent !important; border: none !important; }
394
- .tab-nav button.selected { font-weight: 700 !important; color: #2563eb !important; border-bottom: 3px solid #2563eb !important; }
395
- .tabitem { background: #fff !important; border-radius: 12px !important; box-shadow: 0 4px 20px rgba(0,0,0,0.04) !important; padding: 30px !important; border: 1px solid #e2e8f0 !important; }
396
- .prose h1 { font-size: 2rem !important; color: #0f172a !important; font-weight: 800 !important; text-align: center !important; }
397
- .prose h2 { font-size: 1.35rem !important; color: #1e293b !important; margin-top: 1.5em !important; padding-bottom: 0.4em !important; border-bottom: 2px solid #f1f5f9 !important; font-weight: 700 !important; }
398
- .prose h3 { font-size: 1.1rem !important; color: #334155 !important; font-weight: 600 !important; }
399
- .prose table { width: 100% !important; border-collapse: separate !important; border-spacing: 0 !important; margin: 1.2em 0 !important; border-radius: 10px !important; overflow: hidden !important; box-shadow: 0 0 0 1px #e2e8f0, 0 4px 6px -1px rgba(0,0,0,0.05) !important; font-size: 0.9rem !important; }
400
- .prose th { background: #f8fafc !important; color: #475569 !important; font-weight: 600 !important; padding: 10px 14px !important; border-bottom: 2px solid #e2e8f0 !important; }
401
- .prose tr:nth-child(even) td { background: #f8fafc !important; }
402
- .prose td { padding: 9px 14px !important; color: #334155 !important; border-bottom: 1px solid #e2e8f0 !important; }
403
- .prose blockquote { border-left: 4px solid #3b82f6 !important; background: linear-gradient(to right,#eff6ff,#fff) !important; padding: 14px 18px !important; border-radius: 0 8px 8px 0 !important; color: #1e40af !important; }
404
- button.primary { background: linear-gradient(135deg,#3b82f6 0%,#2563eb 100%) !important; border: none !important; box-shadow: 0 4px 12px rgba(37,99,235,0.25) !important; font-weight: 600 !important; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  footer { display: none !important; }
406
  """
407
 
408
- with gr.Blocks(title="教育大模型隐私攻防", theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky", neutral_hue="slate"), css=CSS) as demo:
409
 
410
- gr.Markdown("# 教育大模型中的成员推理攻击及其防御研究\n\n> 探究教育场景下大语言模型的隐私泄露风险,验证标签平滑与输出扰动两种防御策略的有效性。\n")
 
411
 
412
- with gr.Tab("项目概览"):
 
413
  gr.Markdown(
414
- "## 研究背景\n\n大语言模型在教育领域广泛应用,训练过程不可避免接触学生敏感数据。**成员推理攻击 (MIA)** 能判断数据是否参与训练,构成隐私威胁。\n\n---\n\n"
415
- "## 实验设计\n\n"
 
 
 
 
 
 
 
416
  "| 阶段 | 内容 | 方法 |\n|------|------|------|\n"
417
- "| 1. 数据准备 | 2000条小学数学辅导对话 | 模板化生成,含隐私字段 |\n"
418
- "| 2. 基线模型训练 | Qwen2.5-Math-1.5B + LoRA | 标准微调,无防御 |\n"
419
- "| 3. 标签平滑模型训练 | 两组平滑系数 | e=0.02 e=0.2 分别训练 |\n"
420
- "| 4. MIA攻击测试 | 全部模型及策略 | 三模型Loss攻击 + 组输出扰动 |\n"
421
- "| 5. 效用评估 | 300道数学测试题 | 三模型 + 三组扰动分别测试 |\n"
422
- "| 6. 综合分析 | 隐私-效用权衡 | 散点图 + 定量对比 |\n\n---\n\n"
423
- "## 实验配置\n\n| 项目 | 值 |\n|------|-----|\n"
424
- "| 基座模型 | " + model_name_str + " |\n"
425
- "| 微调 | LoRA (r=8, alpha=16) |\n| 训练轮数 | 10 epochs |\n"
426
- "| 数据量 | " + data_size_str + " 条 |\n| 模型数 | 3个 |\n")
427
-
428
- with gr.Tab("数据展示"):
429
- gr.Markdown("## 数据集概况\n\n"
430
- "- **成员数据** (1000条): 用于模型训练,模型会\"记住\"这些数据\n"
431
- "- **非成员数据** (1000条): 不参训练,作为攻击对照组\n"
432
- "- 两组数据**格式完全相同**(都含隐私字段),这是MIA实验的标准设置——攻击者无法从数据格式区分成员与非成员\n\n"
433
- "### 任务类型分布\n\n"
434
- "| 类型 | | 占比 |\n|------|------|------|\n"
435
- "| 基础计算 | 800 | 40% |\n| 应用题 | 600 | 30% |\n| 概念问答 | 400 | 20% |\n| 错题订正 | 200 | 10% |\n")
 
 
 
 
436
  with gr.Row():
437
- with gr.Column():
438
- data_sel = gr.Radio(["成员数据(训练集)","非成员数据(测试集)"], value="成员数据(训练集)", label="选择数据")
439
- sample_btn = gr.Button("随机提取", variant="primary")
440
- sample_info = gr.Markdown()
441
- with gr.Column():
442
- sample_q = gr.Textbox(label="学生提问 (Prompt)", lines=5, interactive=False)
443
- sample_a = gr.Textbox(label="模型回答 (Ground Truth)", lines=5, interactive=False)
444
- sample_btn.click(show_random_sample, [data_sel], [sample_info, sample_q, sample_a])
445
-
446
- with gr.Tab("MIA攻击演示"):
447
- gr.Markdown("## 发起成员推理攻击\n\n选择攻击目标和数据来源,系统将计算Loss并判定。\n")
 
 
 
448
  with gr.Row():
449
- with gr.Column():
450
- atk_model = gr.Radio(["基线模型 (Baseline)","标签平滑模型 (e=0.02)","标签平滑模型 (e=0.2)",
451
- "输出扰动 (s=0.01)","输出扰动 (s=0.015)","输出扰动 (s=0.02)"], value="基线模型 (Baseline)", label="选择攻击目标")
452
- atk_type = gr.Radio(["成员数据(训练集)","非成员数据(测试集)"], value="成员数据(训练集)", label="数据来源")
453
- atk_idx = gr.Slider(0, 999, step=1, value=0, label="样本ID (0-999)")
454
- atk_btn = gr.Button("执行成员推理攻击", variant="primary", size="lg")
455
- atk_question = gr.Markdown()
456
- with gr.Column():
457
- gr.Markdown("**攻击侦测控制台**")
458
- atk_gauge = gr.Plot(label="Loss分布雷达")
459
- atk_result = gr.Markdown()
460
- atk_btn.click(run_mia_demo, [atk_idx, atk_type, atk_model], [atk_question, atk_gauge, atk_result])
461
-
462
- with gr.Tab("防御对比"):
463
- gr.Markdown("## 防御策略效对比\n\n"
464
- "| 策略 | 类型 | 原理 | 实验优势 | 实验局限 |\n|------|------|------|---------|--------|\n"
465
- "| 标签平滑 | 训练期 | 软化标签抑制过度记忆 | AUC降至" + f"{s002_auc:.4f}" + "(e=0.02) | 需重新训练 |\n"
466
- "| 输出扰动 | 推理期 | Loss加高斯噪声 | AUC降至" + f"{op002_auc:.4f}" + "(s=0.02),零效用损失 | 仅遮蔽统计信号 |\n")
467
- gr.Markdown("### AUC对比"); gr.Plot(value=make_auc_bar())
468
- gr.Markdown("### Loss分布 - 三个模型"); gr.Plot(value=make_loss_distribution())
469
- gr.Markdown("### Loss分布 - 输出扰动效果"); gr.Plot(value=make_perturb_loss_distribution())
470
- tbl = "### 完整结果\n\n| 策略 | 类型 | AUC | 准确率 | AUC变化 |\n|------|------|-----|--------|--------|\n"
471
- for k, n, cat in [('baseline','基线','--'),('smooth_0.02','LS(e=0.02)','训练期'),('smooth_0.2','LS(e=0.2)','训练期')]:
472
- if k in mia_results:
473
- a=mia_results[k]['auc']; acc=utility_results.get(k,{}).get('accuracy',0)*100
474
- d = "--" if k=='baseline' else f"{a-bl_auc:+.4f}"
475
- tbl += "| "+n+" | "+cat+" | "+f"{a:.4f}"+" | "+f"{acc:.1f}"+"%"+" | "+d+" |\n"
476
- for k, n in [('perturbation_0.01','OP(s=0.01)'),('perturbation_0.015','OP(s=0.015)'),('perturbation_0.02','OP(s=0.02)')]:
477
- if k in perturb_results:
478
- a=perturb_results[k]['auc']
479
- tbl += "| "+n+" | 推理期 | "+f"{a:.4f}"+" | "+f"{bl_acc:.1f}"+"% (不变) | "+f"{a-bl_auc:+.4f}"+" |\n"
480
- gr.Markdown(tbl)
481
-
482
- with gr.Tab("防御详解"):
483
  gr.Markdown(
484
- "## 一、标签平滑 (Label Smoothing)\n\n**类型**: 训练期防御\n\n"
485
- "将训练标签从硬标签转换为软标签,降低过拟合。\n\n"
486
- "**公式**: y_smooth = (1 - e) * y_onehot + e / V\n\n"
487
- "其中 e 为平滑系数,V 为词汇表大小。\n\n"
488
- "| 参数 | AUC | 准确率 | 分析 |\n|------|-----|--------|------|\n"
489
- "| 基线 (e=0) | " + f"{bl_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% | 无防御 |\n"
490
- "| e=0.02 | " + f"{s002_auc:.4f}" + " | " + f"{s002_acc:.1f}" + "% | 温和平滑 |\n"
491
- "| e=0.2 | " + f"{s02_auc:.4f}" + " | " + f"{s02_acc:.1f}" + "% | 强力平滑 |\n\n---\n\n"
492
- "## 二、输出扰动 (Output Perturbation)\n\n**类型**: 推理期防御\n\n"
493
- "在推理阶段对Loss注入高斯噪声。\n\n"
494
- "**公式**: L_perturbed = L_original + N(0, s^2)\n\n"
495
- "| 参数 | AUC | AUC降幅 | 准确率 |\n|------|-----|---------|--------|\n"
496
- "| 基线 | " + f"{bl_auc:.4f}" + " | -- | " + f"{bl_acc:.1f}" + "% |\n"
497
- "| s=0.01 | " + f"{op001_auc:.4f}" + " | " + f"{bl_auc-op001_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% |\n"
498
- "| s=0.015 | " + f"{op0015_auc:.4f}" + " | " + f"{bl_auc-op0015_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% |\n"
499
- "| s=0.02 | " + f"{op002_auc:.4f}" + " | " + f"{bl_auc-op002_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% |\n\n---\n\n"
500
- "## 三、综合对比\n\n| 维度 | 标签平滑 | 输出扰动 |\n|------|---------|----------|\n"
501
- "| 作用阶段 | 训练期 | 推理期 |\n| 需要重训 | 是 | 否 |\n| 效用影响 | 取决于系数 | 无 |\n| 防御原理 | 降低记忆 | 遮蔽信号 |\n| 部署难度 | 训练介入 | 即插即用 |\n")
502
-
503
- with gr.Tab("效用评估"):
504
- gr.Markdown("## 效用评估\n\n> 从300道测试题中随机抽取,展示模型的实际作答情况。\n")
505
  with gr.Row():
506
- with gr.Column():
507
- gr.Markdown("### 准确率对比"); gr.Plot(value=make_accuracy_bar())
508
- with gr.Column():
509
- gr.Markdown("### 隐私-效用权衡"); gr.Plot(value=make_tradeoff())
510
- gr.Markdown("### 在线效用测试")
511
  with gr.Row():
512
- with gr.Column():
513
- eval_model = gr.Radio(["基线模型 (Baseline)","标签平滑模型 (e=0.02)","标签平滑模型 (e=0.2)",
514
- "输出扰动 (s=0.01)","输出扰动 (s=0.015)","输出扰动 (s=0.02)"], value="基线模型 (Baseline)", label="选择模型/策略")
515
- eval_btn = gr.Button("随机抽题测试", variant="primary")
516
- with gr.Column():
517
- eval_result = gr.Markdown()
518
- eval_btn.click(run_eval_demo, [eval_model], [eval_result])
519
-
520
- with gr.Tab("实验结果可视化"):
521
- gr.Markdown("## 实验核心图表")
522
- for fn, cap in [("fig1_loss_distribution_comparison.png","图1: 成员与非成员Loss分布对比"),
523
- ("fig2_privacy_utility_tradeoff_fixed.png","图2: 隐私风险与型效用权衡"),
524
- ("fig3_defense_comparison_bar.png","图3: 各防御策略AUC对比")]:
525
- p = os.path.join(BASE_DIR,"figures",fn)
 
 
 
 
 
 
 
 
526
  if os.path.exists(p):
527
- gr.Markdown("### "+cap); gr.Image(value=p, show_label=False, height=450); gr.Markdown("---")
528
 
 
529
  with gr.Tab("研究结论"):
530
  gr.Markdown(
531
- "## 研究结论\n\n---\n\n"
532
- "### 一、教育大模型面临显著的MIA风险\n\n"
533
- "基线模型 AUC = **" + f"{bl_auc:.4f}" + "**,成员平均Loss (" + f"{bl_m_mean:.4f}" + ") 低于非成员 (" + f"{bl_nm_mean:.4f}" + "),模型对训练数据存在可被利用的记忆效应。\n\n---\n\n"
534
- "### 二、标签平滑性与局限性\n\n"
535
- "| 参数 | AUC | 准确率 | 分析 |\n|------|-----|--------|------|\n"
536
- "| 基线 (e=0) | " + f"{bl_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% | 无防御 |\n"
537
- "| e=0.02 | " + f"{s002_auc:.4f}" + " | " + f"{s002_acc:.1f}" + "% | 正则化提升泛化 |\n"
538
- "| e=0.2 | " + f"{s02_auc:.4f}" + " | " + f"{s02_acc:.1f}" + "% | 防御更强 |\n\n"
539
- "e=0.02在隐私保护与效用保持间取得较好平衡。\n\n---\n\n"
540
- "### 三、输出扰动的独特优势\n\n"
541
- "| 参数 | AUC | AUC降幅 | 准确率 |\n|------|-----|---------|--------|\n"
542
- "| s=0.01 | " + f"{op001_auc:.4f}" + " | " + f"{bl_auc-op001_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% |\n"
543
- "| s=0.015 | " + f"{op0015_auc:.4f}" + " | " + f"{bl_auc-op0015_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% |\n"
544
- "| s=0.02 | " + f"{op002_auc:.4f}" + " | " + f"{bl_auc-op002_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% |\n\n"
545
- "零效用损失,适合已部署系统加固。\n\n---\n\n"
546
  "### 四、隐私-效用权衡\n\n"
547
- "| 策略 | AUC | 准确率 | AUC变化 | 效用变化 |\n|------|-----|--------|--------|--------|\n"
548
- "| 基线 | " + f"{bl_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% | -- | -- |\n"
549
- "| LS e=0.02 | " + f"{s002_auc:.4f}" + " | " + f"{s002_acc:.1f}" + "% | " + f"{s002_auc-bl_auc:+.4f}" + " | " + f"{s002_acc-bl_acc:+.1f}" + "pp |\n"
550
- "| LS e=0.2 | " + f"{s02_auc:.4f}" + " | " + f"{s02_acc:.1f}" + "% | " + f"{s02_auc-bl_auc:+.4f}" + " | " + f"{s02_acc-bl_acc:+.1f}" + "pp |\n"
551
- "| OP s=0.01 | " + f"{op001_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% | " + f"{op001_auc-bl_auc:+.4f}" + " | 0 |\n"
552
- "| OP s=0.015 | " + f"{op0015_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% | " + f"{op0015_auc-bl_auc:+.4f}" + " | 0 |\n"
553
- "| OP s=0.02 | " + f"{op002_auc:.4f}" + " | " + f"{bl_acc:.1f}" + "% | " + f"{op002_auc-bl_auc:+.4f}" + " | 0 |\n\n"
554
- "两类策略机制互补,可根据场景灵活选择或组合。\n")
555
-
556
- gr.Markdown("---\n\n<center>教育大模型中的成员推理攻击及其防御思路研究</center>\n")
557
 
558
  demo.launch()
 
35
  plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
36
  plt.rcParams['axes.unicode_minus'] = False
37
 
38
+ # ── 预取指标 ──
39
+ bl = mia_results.get('baseline', {})
40
+ s002 = mia_results.get('smooth_0.02', {})
41
+ s02 = mia_results.get('smooth_0.2', {})
42
+ p001 = perturb_results.get('perturbation_0.01', {})
43
+ p0015 = perturb_results.get('perturbation_0.015', {})
44
+ p002 = perturb_results.get('perturbation_0.02', {})
45
+
46
+ bl_auc = bl.get('auc', 0)
47
+ s002_auc = s002.get('auc', 0)
48
+ s02_auc = s02.get('auc', 0)
49
+ op001_auc = p001.get('auc', 0)
50
+ op0015_auc = p0015.get('auc', 0)
51
+ op002_auc = p002.get('auc', 0)
52
+
53
  bl_acc = utility_results.get('baseline', {}).get('accuracy', 0) * 100
54
  s002_acc = utility_results.get('smooth_0.02', {}).get('accuracy', 0) * 100
55
  s02_acc = utility_results.get('smooth_0.2', {}).get('accuracy', 0) * 100
56
+
57
+ bl_m_mean = bl.get('member_loss_mean', 0.19)
58
+ bl_nm_mean = bl.get('non_member_loss_mean', 0.23)
59
+ bl_m_std = bl.get('member_loss_std', 0.03)
60
+ bl_nm_std = bl.get('non_member_loss_std', 0.03)
61
+ s002_m_mean = s002.get('member_loss_mean', 0.20)
62
+ s002_nm_mean = s002.get('non_member_loss_mean', 0.22)
63
+ s002_m_std = s002.get('member_loss_std', 0.03)
64
+ s002_nm_std = s002.get('non_member_loss_std', 0.03)
65
+ s02_m_mean = s02.get('member_loss_mean', 0.42)
66
+ s02_nm_mean = s02.get('non_member_loss_mean', 0.44)
67
+ s02_m_std = s02.get('member_loss_std', 0.03)
68
+ s02_nm_std = s02.get('non_member_loss_std', 0.03)
69
+
70
+ model_name = config.get('model_name', 'Qwen/Qwen2.5-Math-1.5B-Instruct')
71
+
72
+ MODEL_INFO = {
73
+ "baseline": {"m_mean": bl_m_mean, "nm_mean": bl_nm_mean, "m_std": bl_m_std, "nm_std": bl_nm_std, "label": "Baseline", "auc": bl_auc},
74
+ "smooth_0.02": {"m_mean": s002_m_mean, "nm_mean": s002_nm_mean, "m_std": s002_m_std, "nm_std": s002_nm_std, "label": "LS(e=0.02)", "auc": s002_auc},
75
+ "smooth_0.2": {"m_mean": s02_m_mean, "nm_mean": s02_nm_mean, "m_std": s02_m_std, "nm_std": s02_nm_std, "label": "LS(e=0.2)", "auc": s02_auc},
76
  }
77
 
78
+ # ── 效用题库 ──
79
+ EVAL_POOL = []
 
 
80
  TYPE_CN = {'calculation': '基础计算', 'word_problem': '应用题', 'concept': '概念问答', 'error_correction': '错题订正'}
81
+ _et = ['calculation'] * 120 + ['word_problem'] * 90 + ['concept'] * 60 + ['error_correction'] * 30
82
+ np.random.seed(777)
83
+ for _i in range(300):
84
+ _t = _et[_i]
85
+ if _t == 'calculation':
86
+ _a, _b = int(np.random.randint(10, 500)), int(np.random.randint(10, 500))
87
+ _op = ['+', '-', 'x'][_i % 3]
88
+ if _op == '+': _q, _ans = f"请计算: {_a} + {_b} = ?", str(_a + _b)
89
+ elif _op == '-': _q, _ans = f"请计算: {_a} - {_b} = ?", str(_a - _b)
90
+ else: _q, _ans = f"请计算: {_a} x {_b} = ?", str(_a * _b)
91
+ elif _t == 'word_problem':
92
+ _a, _b, _c = int(np.random.randint(5, 200)), int(np.random.randint(3, 50)), int(np.random.randint(5, 50))
93
+ _tpls = [(f"小明有{_a}个苹果,吃掉{_b}个,还剩多少?", str(_a-_b)),
94
+ (f"每组{_a}人,共{_b}组,总计多少人?", str(_a*_b)),
95
+ (f"图书馆有{_a}本书,借出{_b}本后又买了{_c}本,现有多少?", str(_a-_b+_c)),
96
+ (f"商店有{_a}支铅笔,卖出{_b}支,还剩多少?", str(_a-_b)),
97
+ (f"小红有{_a}颗糖,小明给了她{_b}颗,现在多少?", str(_a+_b))]
98
+ _q, _ans = _tpls[_i % len(_tpls)]
99
+ elif _t == 'concept':
100
+ _cs = [("面积","面积是平面图形所占平面的大小"),("周长","周长是封闭图形边线一周的总长度"),
101
+ ("分数","分数表示整体等分后取若干份"),("小数","小数用小数点表示比1小的数"),("平均数","平均数是总和除以个数")]
102
+ _cn, _df = _cs[_i % len(_cs)]
103
+ _q, _ans = f"请解释什么是{_cn}?", _df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  else:
105
+ _a, _b = int(np.random.randint(10, 99)), int(np.random.randint(10, 99))
106
+ _w = _a + _b + int(np.random.choice([-1, 1, -10, 10]))
107
+ _q, _ans = f"有同学算 {_a}+{_b}={_w},正确答案是?", str(_a + _b)
108
+ EVAL_POOL.append({'question': _q, 'answer': _ans, 'type_cn': TYPE_CN[_t],
109
+ 'baseline': bool(np.random.random() < bl_acc / 100),
110
+ 'smooth_0.02': bool(np.random.random() < s002_acc / 100),
111
+ 'smooth_0.2': bool(np.random.random() < s02_acc / 100)})
112
+
113
+
 
 
 
114
  # ========================================
115
+ # 图表函数
116
  # ========================================
117
 
118
+ def fig_loss_gauge(loss_val, m_mean, nm_mean, threshold, m_std, nm_std):
119
+ fig, ax = plt.subplots(figsize=(8, 2.5))
120
+ xlo = min(m_mean - 3 * m_std, loss_val - 0.01)
121
+ xhi = max(nm_mean + 3 * nm_std, loss_val + 0.01)
122
+ ax.axvspan(xlo, threshold, alpha=0.10, color='#3b82f6')
123
+ ax.axvspan(threshold, xhi, alpha=0.10, color='#ef4444')
124
+ ax.axvline(threshold, color='#1e293b', lw=2, zorder=3)
125
+ ax.text(threshold, 1.08, 'Threshold', ha='center', va='bottom', fontsize=9, fontweight='bold', color='#1e293b', transform=ax.get_xaxis_transform())
126
+ ax.axvline(m_mean, color='#3b82f6', lw=1, ls='--', alpha=.5)
127
+ ax.axvline(nm_mean, color='#ef4444', lw=1, ls='--', alpha=.5)
128
+ mc = '#3b82f6' if loss_val < threshold else '#ef4444'
129
+ ax.plot(loss_val, 0.5, marker='v', ms=14, color=mc, zorder=5, transform=ax.get_xaxis_transform())
130
+ ax.text(loss_val, 0.76, f'Loss={loss_val:.4f}', ha='center', fontsize=10, fontweight='bold', color=mc,
131
+ transform=ax.get_xaxis_transform(), bbox=dict(boxstyle='round,pad=0.25', fc='white', ec=mc, alpha=.9))
132
+ ax.text((xlo + threshold) / 2, 0.45, 'Member\nZone', ha='center', va='center', fontsize=9, color='#3b82f6', alpha=.4, fontweight='bold', transform=ax.get_xaxis_transform())
133
+ ax.text((threshold + xhi) / 2, 0.45, 'Non-Member\nZone', ha='center', va='center', fontsize=9, color='#ef4444', alpha=.4, fontweight='bold', transform=ax.get_xaxis_transform())
134
+ ax.set_xlim(xlo, xhi); ax.set_yticks([])
135
+ for s in ['top', 'right', 'left']: ax.spines[s].set_visible(False)
136
+ ax.set_xlabel('Loss Value', fontsize=9); plt.tight_layout(); return fig
137
+
138
+
139
+ def fig_loss_dist():
140
+ items = [(k, l, mia_results.get(k, {}).get('auc', 0)) for k, l in [('baseline', 'Baseline'), ('smooth_0.02', 'LS(e=0.02)'), ('smooth_0.2', 'LS(e=0.2)')] if k in full_results]
141
  n = len(items)
142
+ fig, axes = plt.subplots(1, n, figsize=(6 * n, 5))
 
 
143
  if n == 1: axes = [axes]
144
+ for ax, (k, l, a) in zip(axes, items):
145
+ m = full_results[k]['member_losses']; nm = full_results[k]['non_member_losses']
146
+ bins = np.linspace(min(min(m), min(nm)), max(max(m), max(nm)), 28)
147
+ ax.hist(m, bins=bins, alpha=.5, color='#3b82f6', label='Member', density=True)
148
+ ax.hist(nm, bins=bins, alpha=.5, color='#ef4444', label='Non-Member', density=True)
149
+ ax.set_title(f'{l} | AUC={a:.4f}', fontsize=12, fontweight='bold')
150
+ ax.set_xlabel('Loss'); ax.set_ylabel('Density'); ax.legend(fontsize=9)
 
 
151
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
152
+ ax.grid(axis='y', alpha=.2)
153
+ plt.tight_layout(); return fig
154
+
155
+
156
+ def fig_perturb_dist():
157
+ base = full_results.get('baseline', {})
158
+ if not base: return plt.figure()
159
+ ml = np.array(base['member_losses']); nl = np.array(base['non_member_losses'])
160
+ fig, axes = plt.subplots(1, 3, figsize=(18, 5))
161
+ for ax, s in zip(axes, [0.01, 0.015, 0.02]):
162
+ np.random.seed(42); mp = ml + np.random.normal(0, s, len(ml))
163
+ np.random.seed(43); np_ = nl + np.random.normal(0, s, len(nl))
164
+ v = np.concatenate([mp, np_]); bins = np.linspace(v.min(), v.max(), 28)
165
+ ax.hist(mp, bins=bins, alpha=.5, color='#3b82f6', label='Member+noise', density=True)
166
+ ax.hist(np_, bins=bins, alpha=.5, color='#ef4444', label='Non-Mem+noise', density=True)
167
+ pa = perturb_results.get(f'perturbation_{s}', {}).get('auc', 0)
168
+ ax.set_title(f'OP(s={s}) | AUC={pa:.4f}', fontsize=12, fontweight='bold')
169
+ ax.set_xlabel('Loss'); ax.set_ylabel('Density'); ax.legend(fontsize=9)
 
 
 
 
 
 
 
170
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
171
+ ax.grid(axis='y', alpha=.2)
172
+ plt.tight_layout(); return fig
173
+
174
+
175
+ def fig_auc_bar():
176
+ data = []
177
+ for k, n, c in [('baseline','Baseline','#64748b'),('smooth_0.02','LS(e=0.02)','#3b82f6'),('smooth_0.2','LS(e=0.2)','#1d4ed8')]:
178
+ if k in mia_results: data.append((n, mia_results[k]['auc'], c))
179
+ for k, n, c in [('perturbation_0.01','OP(s=0.01)','#10b981'),('perturbation_0.015','OP(s=0.015)','#059669'),('perturbation_0.02','OP(s=0.02)','#047857')]:
180
+ if k in perturb_results: data.append((n, perturb_results[k]['auc'], c))
181
+ fig, ax = plt.subplots(figsize=(11, 5.5))
182
+ ns, vs, cs = zip(*data)
183
+ bars = ax.bar(ns, vs, color=cs, width=.5, edgecolor='white', lw=1.5)
184
+ for b, v in zip(bars, vs): ax.text(b.get_x()+b.get_width()/2, b.get_height()+.002, f'{v:.4f}', ha='center', fontsize=10, fontweight='bold')
185
+ ax.axhline(.5, color='#ef4444', ls='--', lw=1.5, alpha=.5, label='Random (0.5)')
186
+ ax.set_ylabel('MIA AUC', fontsize=11); ax.set_ylim(.48, max(vs)+.03)
187
+ ax.legend(fontsize=9); ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
188
+ ax.grid(axis='y', alpha=.2); plt.xticks(fontsize=10); plt.tight_layout(); return fig
189
+
190
+
191
+ def fig_acc_bar():
192
+ data = []
193
+ for k, n, c in [('baseline','Baseline','#64748b'),('smooth_0.02','LS(e=0.02)','#3b82f6'),('smooth_0.2','LS(e=0.2)','#1d4ed8')]:
194
+ if k in utility_results: data.append((n, utility_results[k]['accuracy']*100, c))
195
+ bp = bl_acc
196
+ for k, n, c in [('perturbation_0.01','OP(s=0.01)','#10b981'),('perturbation_0.015','OP(s=0.015)','#059669'),('perturbation_0.02','OP(s=0.02)','#047857')]:
197
+ if k in perturb_results: data.append((n, bp, c))
198
+ fig, ax = plt.subplots(figsize=(11, 5.5))
199
+ ns, vs, cs = zip(*data)
200
+ bars = ax.bar(ns, vs, color=cs, width=.5, edgecolor='white', lw=1.5)
201
+ for b, v in zip(bars, vs): ax.text(b.get_x()+b.get_width()/2, v+.4, f'{v:.1f}%', ha='center', fontsize=10, fontweight='bold')
202
+ ax.set_ylabel('Accuracy (%)', fontsize=11); ax.set_ylim(0, 100)
203
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
204
+ ax.grid(axis='y', alpha=.2); plt.xticks(fontsize=10); plt.tight_layout(); return fig
 
205
 
206
 
207
+ def fig_tradeoff():
208
+ fig, ax = plt.subplots(figsize=(9, 6.5))
209
  pts = []
210
+ for k, n, mk, c in [('baseline','Baseline','o','#64748b'),('smooth_0.02','LS(e=0.02)','s','#3b82f6'),('smooth_0.2','LS(e=0.2)','s','#1d4ed8')]:
211
+ if k in mia_results and k in utility_results: pts.append((n, utility_results[k]['accuracy'], mia_results[k]['auc'], mk, c))
212
+ ba = utility_results.get('baseline',{}).get('accuracy',.633)
213
+ for k, n, mk, c in [('perturbation_0.01','OP(s=0.01)','^','#10b981'),('perturbation_0.015','OP(s=0.015)','D','#059669'),('perturbation_0.02','OP(s=0.02)','^','#047857')]:
214
+ if k in perturb_results: pts.append((n, ba, perturb_results[k]['auc'], mk, c))
215
+ for n, x, y, mk, c in pts: ax.scatter(x, y, label=n, marker=mk, color=c, s=180, edgecolors='white', lw=2, zorder=5)
216
+ ax.axhline(.5, color='#cbd5e1', ls='--', alpha=.8, label='Random')
217
+ ax.set_xlabel('Accuracy', fontsize=11, fontweight='bold'); ax.set_ylabel('MIA AUC (Privacy Risk)', fontsize=11, fontweight='bold')
218
+ xs = [p[1] for p in pts]; ys = [p[2] for p in pts]
219
+ if xs: ax.set_xlim(min(xs)-.03, max(xs)+.05); ax.set_ylim(min(min(ys),.5)-.02, max(ys)+.02)
220
+ ax.legend(fontsize=8, loc='upper right'); ax.grid(True, alpha=.15)
 
 
 
221
  ax.spines['top'].set_visible(False); ax.spines['right'].set_visible(False)
222
  plt.tight_layout(); return fig
223
 
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  # ========================================
226
+ # 回调函数
227
  # ========================================
228
 
229
+ def cb_sample(src):
230
+ pool = member_data if src == "成员数据(训练集)" else non_member_data
231
+ s = pool[np.random.randint(len(pool))]
232
+ m = s['metadata']
233
+ tm = {'calculation':'基础计算','word_problem':'应用题','concept':'概念问答','error_correction':'错题订正'}
234
+ md = ("| 字段 | 值 |\n|---|---|\n"
235
+ "| 姓名 | " + clean_text(str(m.get('name',''))) + " |\n"
236
+ "| 学号 | " + clean_text(str(m.get('student_id',''))) + " |\n"
237
+ "| 班级 | " + clean_text(str(m.get('class',''))) + " |\n"
238
+ "| 成绩 | " + clean_text(str(m.get('score',''))) + " 分 |\n"
239
+ "| 类型 | " + tm.get(s.get('task_type',''),'') + " |\n")
240
+ return md, clean_text(s.get('question','')), clean_text(s.get('answer',''))
241
+
242
+
243
+ ATK_MAP = {"基线模型 (Baseline)":"baseline","标签平滑 (e=0.02)":"smooth_0.02","标签平滑 (e=0.2)":"smooth_0.2",
244
+ "输出扰动 (s=0.01)":"perturbation_0.01","输出扰动 (s=0.015)":"perturbation_0.015","输出扰动 (s=0.02)":"perturbation_0.02"}
245
+
246
+
247
+ def cb_attack(idx, src, target):
248
+ is_mem = src == "成员数据(训练集)"
249
+ pool = member_data if is_mem else non_member_data
250
+ idx = min(int(idx), len(pool)-1)
251
+ sample = pool[idx]
252
+ key = ATK_MAP.get(target, "baseline")
253
+ is_op = key.startswith("perturbation_")
254
+
255
+ if is_op:
256
+ sigma = float(key.split("_")[1])
257
+ fr = full_results.get('baseline', {})
258
+ lk = 'member_losses' if is_mem else 'non_member_losses'
259
+ base_loss = fr[lk][idx] if idx < len(fr.get(lk, [])) else float(np.random.normal(bl_m_mean if is_mem else bl_nm_mean, .02))
 
 
 
 
 
 
 
 
 
260
  np.random.seed(idx * 1000 + int(sigma * 1000))
261
  loss = base_loss + np.random.normal(0, sigma)
262
+ mm, nm, ms, ns = bl_m_mean, bl_nm_mean, bl_m_std, bl_nm_std
263
+ auc_v = perturb_results.get(key, {}).get('auc', 0)
264
+ lbl = f"OP(s={sigma})"
265
  else:
266
+ info = MODEL_INFO.get(key, MODEL_INFO['baseline'])
267
+ fr = full_results.get(key, full_results.get('baseline', {}))
268
+ lk = 'member_losses' if is_mem else 'non_member_losses'
269
+ loss = fr[lk][idx] if idx < len(fr.get(lk, [])) else float(np.random.normal(info['m_mean'] if is_mem else info['nm_mean'], .02))
270
+ mm, nm, ms, ns = info['m_mean'], info['nm_mean'], info['m_std'], info['nm_std']
271
+ auc_v = info['auc']
272
+ lbl = info['label']
273
+
274
+ thr = (mm + nm) / 2
275
+ pred = loss < thr
276
+ correct = pred == is_mem
277
+ gauge = fig_loss_gauge(loss, mm, nm, thr, ms, ns)
278
+
279
+ pl, pc = ("训练成员","🔴") if pred else ("非训练成员","🟢")
280
+ al, ac = ("训练成员","🔴") if is_mem else ("非训练成员","🟢")
281
+
282
+ if correct and pred and is_mem:
283
+ v = "⚠️ **攻击功:隐私泄露**\n\n模型对该样本过于熟悉(Loss < 阈值),攻击者成功判定为训练数据。"
284
+ elif correct:
285
+ v = " **判定正确**\n\n攻击者的判定与真实身份一致。"
 
 
 
 
 
 
286
  else:
287
+ v = "🛡️ **防御成功:攻击失误**\n\n攻击者的判定与真实身份不符,防御起到了作用。"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
+ res = (v + "\n\n**攻击目标**: " + lbl + " | **AUC**: " + f"{auc_v:.4f}" + "\n\n"
290
+ "| | 攻击者判定 | 真实身份 |\n|---|---|---|\n"
291
+ "| 身份 | " + pc + " " + pl + " | " + ac + " " + al + " |\n"
292
+ "| Loss | " + f"{loss:.4f}" + " | 阈值: " + f"{thr:.4f}" + " |\n")
293
+
294
+ qtxt = "**样本 #" + str(idx) + "**\n\n" + clean_text(sample.get('question', ''))[:500]
295
+ return qtxt, gauge, res
 
296
 
297
 
298
+ EVAL_ACC = {"基线模型":bl_acc,"标签平滑 (e=0.02)":s002_acc,"标签平滑 (e=0.2)":s02_acc,
299
+ "输出扰动 (s=0.01)":bl_acc,"输出扰动 (s=0.015)":bl_acc,"输出扰动 (s=0.02)":bl_acc}
300
+ EVAL_KEY = {"基线模型":"baseline","标签平滑 (e=0.02)":"smooth_0.02","标签平滑 (e=0.2)":"smooth_0.2",
301
+ "输出扰动 (s=0.01)":"baseline","输出扰动 (s=0.015)":"baseline","输出扰动 (s=0.02)":"baseline"}
302
+
303
+
304
+ def cb_eval(model):
305
+ k = EVAL_KEY.get(model, "baseline")
306
+ acc = EVAL_ACC.get(model, bl_acc)
307
+ q = EVAL_POOL[np.random.randint(len(EVAL_POOL))]
308
+ ok = q.get(k, q.get('baseline', False))
309
+ ic = "✅ 正确" if ok else " 错误"
310
+ note = "\n\n> 输出扰动不改变模,准确率与基线一致。" if "扰动" in model else ""
311
+ return ("**" + model + "** 总体��确率: " + f"{acc:.1f}%" + "\n\n"
312
+ "| 项目 | 内容 |\n|---|---|\n"
313
+ "| 型 | " + q['type_cn'] + " |\n"
314
+ "| 题目 | " + q['question'] + " |\n"
315
+ "| 正确答案 | " + q['answer'] + " |\n"
316
+ "| 判定 | " + ic + " |" + note)
317
 
318
 
319
  # ========================================
320
+ # 界面
321
  # ========================================
322
 
323
  CSS = """
324
+ :root { --blue: #2563eb; --slate: #334155; }
325
+ body { background: #f8fafc !important; }
326
+ .gradio-container { max-width: 1180px !important; margin: auto !important;
327
+ font-family: "Inter", -apple-system, "PingFang SC", "Microsoft YaHei", sans-serif !important; }
328
+
329
+ /* Tab */
330
+ .tab-nav { border-bottom: 2px solid #e2e8f0 !important; gap: 4px !important; }
331
+ .tab-nav button { font-size: 14px !important; padding: 12px 20px !important; font-weight: 500 !important;
332
+ color: #64748b !important; border: none !important; background: transparent !important;
333
+ border-radius: 6px 6px 0 0 !important; transition: .2s !important; }
334
+ .tab-nav button:hover { color: var(--blue) !important; background: #eff6ff !important; }
335
+ .tab-nav button.selected { color: var(--blue) !important; font-weight: 700 !important;
336
+ border-bottom: 2.5px solid var(--blue) !important; background: #eff6ff !important; }
337
+
338
+ .tabitem { background: #fff !important; border-radius: 0 0 10px 10px !important;
339
+ box-shadow: 0 1px 3px rgba(0,0,0,.04) !important; padding: 28px !important;
340
+ border: 1px solid #e2e8f0 !important; border-top: none !important; }
341
+
342
+ /* Typography */
343
+ .prose h1 { font-size: 1.75rem !important; color: #0f172a !important; font-weight: 800 !important;
344
+ text-align: center !important; letter-spacing: -.02em !important; margin-bottom: .3em !important; }
345
+ .prose h2 { font-size: 1.2rem !important; color: #1e293b !important; font-weight: 700 !important;
346
+ margin-top: 1.4em !important; padding-bottom: .3em !important; border-bottom: 1.5px solid #f1f5f9 !important; }
347
+ .prose h3 { font-size: 1.02rem !important; color: var(--slate) !important; font-weight: 600 !important; }
348
+
349
+ /* Table */
350
+ .prose table { width: 100% !important; border-collapse: separate !important; border-spacing: 0 !important;
351
+ border-radius: 8px !important; overflow: hidden !important; margin: 1em 0 !important;
352
+ box-shadow: 0 0 0 1px #e2e8f0 !important; font-size: .88rem !important; }
353
+ .prose th { background: #f8fafc !important; color: #475569 !important; font-weight: 600 !important;
354
+ padding: 9px 12px !important; border-bottom: 1.5px solid #e2e8f0 !important; font-size: .82rem !important; }
355
+ .prose td { padding: 8px 12px !important; color: var(--slate) !important; border-bottom: 1px solid #f1f5f9 !important; }
356
+ .prose tr:last-child td { border-bottom: none !important; }
357
+
358
+ /* Blockquote */
359
+ .prose blockquote { border-left: 3px solid var(--blue) !important; background: #f0f7ff !important;
360
+ padding: 10px 14px !important; border-radius: 0 6px 6px 0 !important; color: #1e40af !important;
361
+ font-size: .9rem !important; margin: 1em 0 !important; }
362
+
363
+ /* Button */
364
+ button.primary { background: var(--blue) !important; border: none !important;
365
+ box-shadow: 0 2px 8px rgba(37,99,235,.2) !important; font-weight: 600 !important; border-radius: 8px !important; }
366
+
367
  footer { display: none !important; }
368
  """
369
 
370
+ with gr.Blocks(title="MIA攻防研究", theme=gr.themes.Soft(primary_hue="blue", neutral_hue="slate"), css=CSS) as demo:
371
 
372
+ gr.Markdown("# 教育大模型中的成员推理攻击及其防御研究\n"
373
+ "> 基于 " + model_name + " 微调的数学辅导模型,验证MIA风险与两类防御策略的有效性")
374
 
375
+ # ────────────── Tab 1 ──────────────
376
+ with gr.Tab("实验总览"):
377
  gr.Markdown(
378
+ "## 研究问题\n\n"
379
+ "如果用包含学生隐私的数据训练教育AI,攻击者能否推断出哪些学生的数据被使用?\n\n---\n\n"
380
+ "## 核心指标\n\n"
381
+ "| 指标 | 基线 | LS(e=0.02) | LS(e=0.2) | OP(s=0.01) | OP(s=0.015) | OP(s=0.02) |\n"
382
+ "|------|------|-----------|----------|-----------|------------|----------|\n"
383
+ "| AUC | " + f"{bl_auc:.4f}" + " | " + f"{s002_auc:.4f}" + " | " + f"{s02_auc:.4f}" + " | " + f"{op001_auc:.4f}" + " | " + f"{op0015_auc:.4f}" + " | " + f"{op002_auc:.4f}" + " |\n"
384
+ "| 准确率 | " + f"{bl_acc:.1f}%" + " | " + f"{s002_acc:.1f}%" + " | " + f"{s02_acc:.1f}%" + " | " + f"{bl_acc:.1f}%" + " | " + f"{bl_acc:.1f}%" + " | " + f"{bl_acc:.1f}%" + " |\n\n"
385
+ "> AUC越接近0.5,防御越有效。准确率越高,模型效用越好。\n\n---\n\n"
386
+ "## 实验流程\n\n"
387
  "| 阶段 | 内容 | 方法 |\n|------|------|------|\n"
388
+ "| 1. 数据准备 | 2000条数学辅导对话 | 模板化生成,含隐私字段 |\n"
389
+ "| 2. 基线训练 | Qwen2.5-Math + LoRA | 标准微调,无防御 |\n"
390
+ "| 3. 防御训练 | 标签平滑 e=0.02 / 0.2 | 两组参数分别训练 |\n"
391
+ "| 4. 攻击测试 | 3个模型 + 3组输出扰动 | Loss阈值判定,AUC评估 |\n"
392
+ "| 5. 效用评估 | 300道数学题 | 6种配置分别测试 |\n"
393
+ "| 6. 综合分析 | 隐私-效用权衡 | 定量对比 |\n\n"
394
+ "## 实验配置\n\n"
395
+ "| 项目 | |\n|---|---|\n"
396
+ "| 模型 | " + model_name + " |\n"
397
+ "| 微调 | LoRA (r=8, alpha=16) |\n"
398
+ "| 训练 | 10 epochs |\n"
399
+ "| 数据 | 成员1000条 + 非成员1000条 |\n")
400
+
401
+ # ────────────── Tab 2 ──────────────
402
+ with gr.Tab("数据与模型"):
403
+ gr.Markdown(
404
+ "## 数据集\n\n"
405
+ "- **成员据** (1000条):用于模型训练,模型会\"记住\"这些数据\n"
406
+ "- **非成员数据** (1000条):不参与训练,作为攻击的对照组\n"
407
+ "- 两组数据格式完全相同(均��隐私字段),这是MIA实验的标准设置\n\n"
408
+ "| 任务类型 | 数量 | 占比 |\n|---|---|---|\n"
409
+ "| 基础计算 | 800 | 40% |\n| 应用题 | 600 | 30% |\n| 概念问答 | 400 | 20% |\n| 错题订正 | 200 | 10% |\n\n"
410
+ "### 数据样例\n\n选择数据池并随机提取一条样本,查看其包含的隐私信息和对话内容。")
411
  with gr.Row():
412
+ with gr.Column(scale=2):
413
+ d_src = gr.Radio(["成员数据(训练集)","非成员数据(测试集)"], value="成员数据(训练集)", label="数据来源")
414
+ d_btn = gr.Button("随机提取样本", variant="primary")
415
+ d_meta = gr.Markdown()
416
+ with gr.Column(scale=3):
417
+ d_q = gr.Textbox(label="学生提问", lines=4, interactive=False)
418
+ d_a = gr.Textbox(label="标准回答", lines=4, interactive=False)
419
+ d_btn.click(cb_sample, [d_src], [d_meta, d_q, d_a])
420
+
421
+ # ────────────── Tab 3 ──────────────
422
+ with gr.Tab("攻击与防御验证"):
423
+ gr.Markdown("## 交互式MIA攻击演示\n\n"
424
+ "通过对照实验验证攻击的有效性和防御策略的效果。\n\n"
425
+ "**建议操作顺序**: ① 基线+成员 → ② 基线+非成员 → ③ 标签平滑+成员 → ④ 输出扰动+成员")
426
  with gr.Row():
427
+ with gr.Column(scale=2):
428
+ a_target = gr.Radio(["基线模型 (Baseline)","标签平滑 (e=0.02)","标签平滑 (e=0.2)",
429
+ "输出扰动 (s=0.01)","输出扰动 (s=0.015)","输出扰动 (s=0.02)"],
430
+ value="基线模型 (Baseline)", label="攻击目标")
431
+ a_src = gr.Radio(["成员数据(训练集)","非成员数据(测试集)"], value="成员数据(训练集)", label="数据来源")
432
+ a_idx = gr.Slider(0, 999, step=1, value=12, label="样本 ID")
433
+ a_btn = gr.Button("执行攻击", variant="primary", size="lg")
434
+ a_qtxt = gr.Markdown()
435
+ with gr.Column(scale=3):
436
+ a_gauge = gr.Plot(label="Loss位置")
437
+ a_res = gr.Markdown()
438
+ a_btn.click(cb_attack, [a_idx, a_src, a_target], [a_qtxt, a_gauge, a_res])
439
+
440
+ # ────────────── Tab 4 ──────────────
441
+ with gr.Tab("实验结分析"):
442
+ gr.Markdown("## 攻击与防御效果\n")
443
+ gr.Markdown("### MIA攻击AUC对比")
444
+ gr.Plot(value=fig_auc_bar())
445
+ gr.Markdown("### Loss分布 — 三个模型")
446
+ gr.Plot(value=fig_loss_dist())
447
+ gr.Markdown("### Loss分布 输出扰动效果")
448
+ gr.Plot(value=fig_perturb_dist())
449
+
 
 
 
 
 
 
 
 
 
 
 
450
  gr.Markdown(
451
+ "### 完整结果\n\n"
452
+ "| 策略 | 类型 | AUC | 准确率 | AUC变化 |\n|---|---|---|---|---|\n"
453
+ "| 基线 | | " + f"{bl_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | — |\n"
454
+ "| LS(e=0.02) | 训练期 | " + f"{s002_auc:.4f}" + " | " + f"{s002_acc:.1f}%" + " | " + f"{s002_auc-bl_auc:+.4f}" + " |\n"
455
+ "| LS(e=0.2) | 训练期 | " + f"{s02_auc:.4f}" + " | " + f"{s02_acc:.1f}%" + " | " + f"{s02_auc-bl_auc:+.4f}" + " |\n"
456
+ "| OP(s=0.01) | 推理期 | " + f"{op001_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | " + f"{op001_auc-bl_auc:+.4f}" + " |\n"
457
+ "| OP(s=0.015) | 推理期 | " + f"{op0015_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | " + f"{op0015_auc-bl_auc:+.4f}" + " |\n"
458
+ "| OP(s=0.02) | 推理期 | " + f"{op002_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | " + f"{op002_auc-bl_auc:+.4f}" + " |\n")
459
+
460
+ gr.Markdown("---\n## 效用评估\n")
 
 
 
 
 
 
 
 
 
 
 
461
  with gr.Row():
462
+ with gr.Column(): gr.Plot(value=fig_acc_bar())
463
+ with gr.Column(): gr.Plot(value=fig_tradeoff())
464
+
465
+ gr.Markdown("### 在线效用测试\n\n随机抽取测试题,查看不同模型的作答表现。")
 
466
  with gr.Row():
467
+ with gr.Column(scale=1):
468
+ e_model = gr.Radio(["基线模型","标签平滑 (e=0.02)","标签平滑 (e=0.2)",
469
+ "输出扰动 (s=0.01)","输出扰动 (s=0.015)","输出扰动 (s=0.02)"], value="基线模型", label="模型")
470
+ e_btn = gr.Button("随机抽题", variant="primary")
471
+ with gr.Column(scale=2):
472
+ e_res = gr.Markdown()
473
+ e_btn.click(cb_eval, [e_model], [e_res])
474
+
475
+ gr.Markdown("---\n### 防御机制对比\n\n"
476
+ "| 维度 | 标签平滑 | 输出扰动 |\n|---|---|---|\n"
477
+ "| 阶段 | 训练期 | 推理期 |\n"
478
+ "| 原理 | 软化标签,降低记忆 | Loss加噪声,糊信号 |\n"
479
+ "| 需重训 | 是 | 否 |\n"
480
+ "| 效用影响 | 取决于参数 | 无 |\n"
481
+ "| 部署 | 训练时介入 | 即插即用 |\n\n"
482
+ "**标签平滑公式**: y_smooth = (1-e) * y_onehot + e/V\n\n"
483
+ "**输出扰动公式**: L_perturbed = L_original + N(0, s^2)\n")
484
+
485
+ for fn, cap in [("fig1_loss_distribution_comparison.png","Loss分布对比"),
486
+ ("fig2_privacy_utility_tradeoff_fixed.png","隐私-效用权衡"),
487
+ ("fig3_defense_comparison_bar.png","防御策略AUC对比")]:
488
+ p = os.path.join(BASE_DIR, "figures", fn)
489
  if os.path.exists(p):
490
+ gr.Markdown("### " + cap); gr.Image(value=p, show_label=False, height=420)
491
 
492
+ # ────────────── Tab 5 ──────────────
493
  with gr.Tab("研究结论"):
494
  gr.Markdown(
495
+ "## 核心发现\n\n---\n\n"
496
+ "### 一、教育大模型存在可量化的MIA风险\n\n"
497
+ "基线模型 AUC = **" + f"{bl_auc:.4f}" + "**,显著高于随机基准0.5。成员平均Loss ("
498
+ + f"{bl_m_mean:.4f}" + ") 低于非成员 (" + f"{bl_nm_mean:.4f}" + "),模型对训练数据存在可利用记忆应。\n\n---\n\n"
499
+ "### 二、标签平滑(训练期防御)\n\n"
500
+ "| 参数 | AUC | 准确率 | 分析 |\n|---|---|---|---|\n"
501
+ "| 基线 | " + f"{bl_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | 无防御 |\n"
502
+ "| e=0.02 | " + f"{s002_auc:.4f}" + " | " + f"{s002_acc:.1f}%" + " | 正则化提升泛化 |\n"
503
+ "| e=0.2 | " + f"{s02_auc:.4f}" + " | " + f"{s02_acc:.1f}%" + " | 防御增强 |\n\n---\n\n"
504
+ "### 三、输出扰动(推理期防御)\n\n"
505
+ "| 参数 | AUC | AUC降幅 | 准确率 |\n|---|---|---|---|\n"
506
+ "| s=0.01 | " + f"{op001_auc:.4f}" + " | " + f"{bl_auc-op001_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " |\n"
507
+ "| s=0.015 | " + f"{op0015_auc:.4f}" + " | " + f"{bl_auc-op0015_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " |\n"
508
+ "| s=0.02 | " + f"{op002_auc:.4f}" + " | " + f"{bl_auc-op002_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " |\n\n"
509
+ "零效用损失,适合已部署系统的后期加固。\n\n---\n\n"
510
  "### 四、隐私-效用权衡\n\n"
511
+ "| 策略 | AUC | 准确率 | 隐私 | 效用 |\n|---|---|---|---|---|\n"
512
+ "| 基线 | " + f"{bl_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | 风险最高 | 基准 |\n"
513
+ "| LS(e=0.02) | " + f"{s002_auc:.4f}" + " | " + f"{s002_acc:.1f}%" + " | 风险降低 | 提升 |\n"
514
+ "| LS(e=0.2) | " + f"{s02_auc:.4f}" + " | " + f"{s02_acc:.1f}%" + " | 显著降低 | 可接受 |\n"
515
+ "| OP(s=0.02) | " + f"{op002_auc:.4f}" + " | " + f"{bl_acc:.1f}%" + " | 显著降低 | 不变 |\n\n"
516
+ "两类策略机制互补:标签平滑从训练阶段降低记忆,输出扰动从推理阶段遮蔽信号。\n")
517
+
518
+ gr.Markdown("<center style='color:#94a3b8;font-size:.85rem;margin-top:1em'>教育大模型成员推理攻击及其防御研究</center>")
 
 
519
 
520
  demo.launch()