Wen1201 commited on
Commit
ffa2193
·
verified ·
1 Parent(s): 0e299c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +349 -152
app.py CHANGED
@@ -17,6 +17,73 @@ st.set_page_config(
17
  initial_sidebar_state="expanded"
18
  )
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  # 導入自定義模組
21
  from bn_core import BayesianNetworkAnalyzer
22
  from llm_assistant import LLMAssistant
@@ -107,8 +174,8 @@ with tab1:
107
  df = None
108
 
109
  if df is not None:
110
- # 特徵選擇
111
- st.subheader("🎯 Feature Selection")
112
 
113
  # 自動識別特徵類型
114
  numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
@@ -120,27 +187,43 @@ with tab1:
120
  col_feat1, col_feat2 = st.columns(2)
121
 
122
  with col_feat1:
123
- st.markdown("**Categorical Features**")
124
- cat_features = st.multiselect(
125
- "Select categorical features:",
126
- options=categorical_cols,
127
- default=categorical_cols[:5] if len(categorical_cols) > 0 else []
128
- )
129
 
130
  with col_feat2:
131
- st.markdown("**Continuous Features**")
132
- con_features = st.multiselect(
133
- "Select continuous features:",
134
- options=numeric_cols,
135
- default=numeric_cols[:3] if len(numeric_cols) > 0 else []
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  )
137
 
138
- # 目標變數
139
- target_variable = st.selectbox(
140
- "🎯 Target Variable (Y):",
141
- options=binary_cols,
142
- help="Must be a binary classification variable"
143
- )
 
 
 
144
 
145
  # 驗證選擇
146
  selected_features = cat_features + con_features
@@ -150,40 +233,57 @@ with tab1:
150
 
151
  st.markdown("---")
152
 
153
- # 模型參數
154
- st.subheader("⚙️ Model Parameters")
155
 
156
- col_param1, col_param2, col_param3 = st.columns(3)
157
 
158
  with col_param1:
159
- test_fraction = st.slider(
160
- "Test Dataset Proportion:",
161
- min_value=0.1,
162
- max_value=0.5,
163
- value=0.25,
164
- step=0.05
165
- )
166
-
167
- algorithm = st.selectbox(
168
  "Network Structure:",
169
  options=['NB', 'TAN', 'CL', 'HC', 'PC'],
170
  format_func=lambda x: {
171
- 'NB': 'Naive Bayes',
172
- 'TAN': 'Tree-Augmented Naive Bayes',
173
  'CL': 'Chow-Liu',
174
  'HC': 'Hill Climbing',
175
- 'PC': 'PC Algorithm'
176
- }[x]
 
177
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  with col_param2:
180
- estimator = st.selectbox(
181
  "Parameter Estimator:",
182
  options=['ml', 'bn'],
183
  format_func=lambda x: {
184
- 'ml': 'Maximum Likelihood',
185
- 'bn': 'Bayesian Estimator'
186
- }[x]
 
187
  )
188
 
189
  if estimator == 'bn':
@@ -191,44 +291,60 @@ with tab1:
191
  "Equivalent Sample Size:",
192
  min_value=1,
193
  value=3,
194
- step=1
 
195
  )
196
  else:
197
  equivalent_sample_size = 3
198
 
199
- # 條件性參數
200
- if algorithm == 'HC':
201
- score_method = st.selectbox(
202
- "Scoring Method:",
203
- options=['BIC', 'AIC', 'K2', 'BDeu', 'BDs']
 
 
204
  )
205
  else:
206
- score_method = 'BIC'
207
 
208
- with col_param3:
209
- if algorithm == 'PC':
210
- sig_level = st.number_input(
211
- "Significance Level:",
212
- min_value=0.01,
213
- max_value=1.0,
214
- value=0.05,
215
- step=0.01
216
- )
217
- else:
218
- sig_level = 0.05
219
 
220
- n_bins = st.number_input(
221
- "Number of Bins (for continuous):",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  min_value=3,
223
  max_value=20,
224
  value=10,
225
- step=1
 
226
  )
227
 
228
  # 執行分析按鈕
229
  st.markdown("---")
230
 
231
- col_btn1, col_btn2, col_btn3 = st.columns([2, 1, 1])
232
 
233
  with col_btn1:
234
  run_button = st.button("🚀 Run Analysis", type="primary", use_container_width=True)
@@ -240,18 +356,18 @@ with tab1:
240
  st.session_state.chat_history = []
241
  st.rerun()
242
 
243
- with col_btn3:
244
- with st.popover("ℹ️ Info"):
245
- st.markdown("""
246
- **Analysis Steps:**
247
- 1. Split data (train/test)
248
- 2. Learn network structure
249
- 3. Process features (bins from train)
250
- 4. Estimate parameters
251
- 5. Evaluate performance
252
-
253
- **Note:** Test set bins are derived from training set to prevent data leakage.
254
- """)
255
 
256
  if run_button:
257
  # 驗證
@@ -338,90 +454,171 @@ with tab1:
338
 
339
  results = st.session_state.analysis_results
340
 
341
- # 網路
342
- st.subheader("🕸️ Bayesian Network Structure")
343
- network_fig = generate_network_graph(results['model'])
344
- st.plotly_chart(network_fig, use_container_width=True)
345
-
346
- # 效能指標
347
- st.subheader("📈 Performance Metrics")
348
 
349
- col_m1, col_m2 = st.columns(2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
 
351
- with col_m1:
352
- st.markdown("**Training Set**")
353
- train_metrics = results['train_metrics']
354
-
355
- metric_cols = st.columns(4)
356
- metric_cols[0].metric("Accuracy", f"{train_metrics['accuracy']:.2f}%")
357
- metric_cols[1].metric("Precision", f"{train_metrics['precision']:.2f}%")
358
- metric_cols[2].metric("Recall", f"{train_metrics['recall']:.2f}%")
359
- metric_cols[3].metric("F1-Score", f"{train_metrics['f1']:.2f}%")
360
-
361
- # 混淆矩陣
362
- conf_fig_train = plot_confusion_matrix(
363
- train_metrics['confusion_matrix'],
364
- title="Training Set Confusion Matrix"
365
- )
366
- st.plotly_chart(conf_fig_train, use_container_width=True)
367
-
368
- # ROC Curve
369
- roc_fig_train = plot_roc_curve(
370
- train_metrics['fpr'],
371
- train_metrics['tpr'],
372
- train_metrics['auc'],
373
- title="Training Set ROC Curve"
374
- )
375
- st.plotly_chart(roc_fig_train, use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
377
- with col_m2:
378
- st.markdown("**Test Set**")
379
- test_metrics = results['test_metrics']
380
-
381
- metric_cols = st.columns(4)
382
- metric_cols[0].metric("Accuracy", f"{test_metrics['accuracy']:.2f}%")
383
- metric_cols[1].metric("Precision", f"{test_metrics['precision']:.2f}%")
384
- metric_cols[2].metric("Recall", f"{test_metrics['recall']:.2f}%")
385
- metric_cols[3].metric("F1-Score", f"{test_metrics['f1']:.2f}%")
386
-
387
- # 混淆矩陣
388
- conf_fig_test = plot_confusion_matrix(
389
- test_metrics['confusion_matrix'],
390
- title="Test Set Confusion Matrix"
391
  )
392
- st.plotly_chart(conf_fig_test, use_container_width=True)
393
-
394
- # ROC Curve
395
- roc_fig_test = plot_roc_curve(
396
- test_metrics['fpr'],
397
- test_metrics['tpr'],
398
- test_metrics['auc'],
399
- title="Test Set ROC Curve"
400
- )
401
- st.plotly_chart(roc_fig_test, use_container_width=True)
402
-
403
- # 條件機率表
404
- st.subheader("📋 Conditional Probability Tables")
405
-
406
- selected_node = st.selectbox(
407
- "Select a node to view its CPD:",
408
- options=list(results['cpds'].keys())
409
- )
410
-
411
- if selected_node:
412
- cpd_df = create_cpd_table(results['cpds'][selected_node])
413
- st.dataframe(cpd_df, use_container_width=True)
414
-
415
- # 評分指標
416
- st.subheader("📊 Model Scores")
417
 
418
- score_cols = st.columns(5)
419
- scores = results['scores']
420
- score_cols[0].metric("Log-Likelihood", f"{scores['log_likelihood']:.2f}")
421
- score_cols[1].metric("BIC Score", f"{scores['bic']:.2f}")
422
- score_cols[2].metric("K2 Score", f"{scores['k2']:.2f}")
423
- score_cols[3].metric("BDeu Score", f"{scores['bdeu']:.2f}")
424
- score_cols[4].metric("BDs Score", f"{scores['bds']:.2f}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
 
426
  # Tab 2: AI 助手
427
  with tab2:
 
17
  initial_sidebar_state="expanded"
18
  )
19
 
20
+ # 自定義 CSS - 讓介面更像 Django
21
+ st.markdown("""
22
+ <style>
23
+ /* Expander 樣式 - 類似 Django 的摺疊區域 */
24
+ .streamlit-expanderHeader {
25
+ background-color: #e8f1f8;
26
+ border: 1px solid #b0cfe8;
27
+ border-radius: 5px;
28
+ font-weight: 600;
29
+ color: #1b4f72;
30
+ }
31
+
32
+ .streamlit-expanderHeader:hover {
33
+ background-color: #d0e7f8;
34
+ }
35
+
36
+ /* Checkbox 樣式 */
37
+ .stCheckbox {
38
+ padding: 2px 0;
39
+ }
40
+
41
+ /* Radio button 樣式 */
42
+ .stRadio > label {
43
+ font-weight: 600;
44
+ color: #1b4f72;
45
+ }
46
+
47
+ /* 選擇框樣式 */
48
+ .stSelectbox > label, .stNumberInput > label {
49
+ font-weight: 600;
50
+ color: #1b4f72;
51
+ }
52
+
53
+ /* 分隔線 */
54
+ hr {
55
+ margin: 1rem 0;
56
+ border-top: 2px solid #b0cfe8;
57
+ }
58
+
59
+ /* 表單容器 */
60
+ .element-container {
61
+ margin-bottom: 0.5rem;
62
+ }
63
+
64
+ /* 摺疊內容區域 */
65
+ .streamlit-expanderContent {
66
+ background-color: #f8fbff;
67
+ border: 1px solid #d0e4f5;
68
+ border-top: none;
69
+ padding: 1rem;
70
+ }
71
+
72
+ /* 按鈕樣式 */
73
+ .stButton > button {
74
+ width: 100%;
75
+ border-radius: 20px;
76
+ font-weight: 600;
77
+ transition: all 0.3s ease;
78
+ }
79
+
80
+ .stButton > button:hover {
81
+ transform: translateY(-2px);
82
+ box-shadow: 0 4px 8px rgba(0,0,0,0.2);
83
+ }
84
+ </style>
85
+ """, unsafe_allow_html=True)
86
+
87
  # 導入自定義模組
88
  from bn_core import BayesianNetworkAnalyzer
89
  from llm_assistant import LLMAssistant
 
174
  df = None
175
 
176
  if df is not None:
177
+ # 特徵選擇 - 使用 expander (可摺疊)
178
+ st.subheader("🎯 Input Features")
179
 
180
  # 自動識別特徵類型
181
  numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
 
187
  col_feat1, col_feat2 = st.columns(2)
188
 
189
  with col_feat1:
190
+ with st.expander("**Continuous**", expanded=False):
191
+ st.caption("Select continuous features:")
192
+ con_features = []
193
+ for col in numeric_cols:
194
+ if st.checkbox(col, value=False, key=f"con_{col}"):
195
+ con_features.append(col)
196
 
197
  with col_feat2:
198
+ with st.expander("**Categorical**", expanded=True):
199
+ st.caption("Select categorical features:")
200
+ cat_features = []
201
+ for col in categorical_cols:
202
+ # 預設勾選前幾個
203
+ default_checked = categorical_cols.index(col) < 5 if len(categorical_cols) > 5 else True
204
+ if st.checkbox(col, value=default_checked, key=f"cat_{col}"):
205
+ cat_features.append(col)
206
+
207
+ # 目標變數 - 放在特徵選擇下方
208
+ st.markdown("---")
209
+
210
+ col_target1, col_target2 = st.columns([1, 2])
211
+ with col_target1:
212
+ target_variable = st.selectbox(
213
+ "Target Variable (Y):",
214
+ options=binary_cols,
215
+ help="Must be a binary classification variable"
216
  )
217
 
218
+ with col_target2:
219
+ test_fraction = st.number_input(
220
+ "Test Dataset Proportion:",
221
+ min_value=0.10,
222
+ max_value=0.50,
223
+ value=0.25,
224
+ step=0.05,
225
+ format="%.2f"
226
+ )
227
 
228
  # 驗證選擇
229
  selected_features = cat_features + con_features
 
233
 
234
  st.markdown("---")
235
 
236
+ # 模型參數 - 使用更緊湊的佈局
237
+ st.subheader("⚙️ Model Configuration")
238
 
239
+ col_param1, col_param2 = st.columns(2)
240
 
241
  with col_param1:
242
+ algorithm = st.radio(
 
 
 
 
 
 
 
 
243
  "Network Structure:",
244
  options=['NB', 'TAN', 'CL', 'HC', 'PC'],
245
  format_func=lambda x: {
246
+ 'NB': 'Naive Bayes (NB)',
247
+ 'TAN': 'Tree-Augmented Naive Bayes (TAN)',
248
  'CL': 'Chow-Liu',
249
  'HC': 'Hill Climbing',
250
+ 'PC': 'PC'
251
+ }[x],
252
+ help="Select structure learning algorithm"
253
  )
254
+
255
+ # 條件性參數 - HC
256
+ if algorithm == 'HC':
257
+ score_method = st.selectbox(
258
+ "Scoring Method:",
259
+ options=['BIC', 'AIC', 'K2', 'BDeu', 'BDs'],
260
+ help="Select scoring method for Hill Climbing"
261
+ )
262
+ else:
263
+ score_method = 'BIC'
264
+
265
+ # 條件性參數 - PC
266
+ if algorithm == 'PC':
267
+ sig_level = st.number_input(
268
+ "Significance Level:",
269
+ min_value=0.01,
270
+ max_value=1.0,
271
+ value=0.05,
272
+ step=0.01,
273
+ help="Significance level for PC algorithm"
274
+ )
275
+ else:
276
+ sig_level = 0.05
277
 
278
  with col_param2:
279
+ estimator = st.radio(
280
  "Parameter Estimator:",
281
  options=['ml', 'bn'],
282
  format_func=lambda x: {
283
+ 'ml': 'MaximumLikelihoodEstimator',
284
+ 'bn': 'BayesianEstimator'
285
+ }[x],
286
+ help="Select parameter estimation method"
287
  )
288
 
289
  if estimator == 'bn':
 
291
  "Equivalent Sample Size:",
292
  min_value=1,
293
  value=3,
294
+ step=1,
295
+ help="Prior strength for Bayesian estimation"
296
  )
297
  else:
298
  equivalent_sample_size = 3
299
 
300
+ # Decision (如果是預設資料集才顯示)
301
+ if data_source == "Use Default Dataset":
302
+ decision = st.selectbox(
303
+ "Decision:",
304
+ options=['OverAll', 'Exposed', 'Unexposed'],
305
+ index=0,
306
+ help="Analysis subset selection"
307
  )
308
  else:
309
+ decision = 'OverAll'
310
 
311
+ # Provide Evidence - 可摺疊區域
312
+ st.markdown("---")
313
+ with st.expander("**Provide Evidence**", expanded=False):
314
+ st.caption("Enter evidence values for inference (optional):")
315
+
316
+ evidence_cols = st.columns(2)
317
+ evidence_dict = {}
 
 
 
 
318
 
319
+ # 為每個非目標變數創建輸入框
320
+ all_vars = [v for v in selected_features if v != target_variable]
321
+
322
+ for idx, var in enumerate(all_vars):
323
+ with evidence_cols[idx % 2]:
324
+ val = st.text_input(
325
+ f"{var}:",
326
+ value="",
327
+ key=f"evidence_{var}",
328
+ help=f"Enter value for {var} (leave empty to ignore)"
329
+ )
330
+ if val.strip():
331
+ evidence_dict[var] = val.strip()
332
+
333
+ # 進階參數 - 摺疊區域
334
+ with st.expander("**Advanced Parameters**", expanded=False):
335
+ n_bins = st.slider(
336
+ "Number of Bins (for continuous variables):",
337
  min_value=3,
338
  max_value=20,
339
  value=10,
340
+ step=1,
341
+ help="Number of bins for discretizing continuous features"
342
  )
343
 
344
  # 執行分析按鈕
345
  st.markdown("---")
346
 
347
+ col_btn1, col_btn2 = st.columns([3, 1])
348
 
349
  with col_btn1:
350
  run_button = st.button("🚀 Run Analysis", type="primary", use_container_width=True)
 
356
  st.session_state.chat_history = []
357
  st.rerun()
358
 
359
+ # 說明資訊
360
+ with st.expander("ℹ️ Analysis Information", expanded=False):
361
+ st.markdown("""
362
+ **Analysis Steps:**
363
+ 1. Split data (train/test)
364
+ 2. Learn network structure
365
+ 3. Process features (bins from train)
366
+ 4. Estimate parameters
367
+ 5. Evaluate performance
368
+
369
+ **Note:** Test set bins are derived from training set to prevent data leakage.
370
+ """)
371
 
372
  if run_button:
373
  # 驗證
 
454
 
455
  results = st.session_state.analysis_results
456
 
457
+ # 使用 tabs 來組織
458
+ result_tabs = st.tabs([
459
+ "🕸️ Network Structure",
460
+ "📈 Performance Metrics",
461
+ "📋 CPD Tables",
462
+ "📊 Model Scores"
463
+ ])
464
 
465
+ # Tab 1: 網路結構
466
+ with result_tabs[0]:
467
+ network_fig = generate_network_graph(results['model'])
468
+ st.plotly_chart(network_fig, use_container_width=True)
469
+
470
+ # 顯示邊的列表
471
+ with st.expander("View Network Edges", expanded=False):
472
+ edges = list(results['model'].edges())
473
+ st.write(f"Total edges: {len(edges)}")
474
+
475
+ # 每行顯示 3 個邊
476
+ for i in range(0, len(edges), 3):
477
+ cols = st.columns(3)
478
+ for j, col in enumerate(cols):
479
+ if i + j < len(edges):
480
+ edge = edges[i + j]
481
+ col.markdown(f"**{edge[0]}** → {edge[1]}")
482
 
483
+ # Tab 2: 效能指標
484
+ with result_tabs[1]:
485
+ col_m1, col_m2 = st.columns(2)
486
+
487
+ with col_m1:
488
+ st.markdown("### Training Set")
489
+ train_metrics = results['train_metrics']
490
+
491
+ # 使用 metrics 卡片
492
+ metric_cols = st.columns(4)
493
+ metric_cols[0].metric("Accuracy", f"{train_metrics['accuracy']:.2f}%")
494
+ metric_cols[1].metric("Precision", f"{train_metrics['precision']:.2f}%")
495
+ metric_cols[2].metric("Recall", f"{train_metrics['recall']:.2f}%")
496
+ metric_cols[3].metric("F1-Score", f"{train_metrics['f1']:.2f}%")
497
+
498
+ metric_cols2 = st.columns(4)
499
+ metric_cols2[0].metric("AUC", f"{train_metrics['auc']:.4f}")
500
+ metric_cols2[1].metric("G-mean", f"{train_metrics['g_mean']:.2f}%")
501
+ metric_cols2[2].metric("P-mean", f"{train_metrics['p_mean']:.2f}%")
502
+ metric_cols2[3].metric("Specificity", f"{train_metrics['specificity']:.2f}%")
503
+
504
+ # 混淆矩陣
505
+ with st.expander("Confusion Matrix", expanded=True):
506
+ conf_fig_train = plot_confusion_matrix(
507
+ train_metrics['confusion_matrix'],
508
+ title="Training Set"
509
+ )
510
+ st.plotly_chart(conf_fig_train, use_container_width=True)
511
+
512
+ # ROC Curve
513
+ with st.expander("ROC Curve", expanded=False):
514
+ roc_fig_train = plot_roc_curve(
515
+ train_metrics['fpr'],
516
+ train_metrics['tpr'],
517
+ train_metrics['auc'],
518
+ title="Training Set"
519
+ )
520
+ st.plotly_chart(roc_fig_train, use_container_width=True)
521
+
522
+ with col_m2:
523
+ st.markdown("### Test Set")
524
+ test_metrics = results['test_metrics']
525
+
526
+ metric_cols = st.columns(4)
527
+ metric_cols[0].metric("Accuracy", f"{test_metrics['accuracy']:.2f}%")
528
+ metric_cols[1].metric("Precision", f"{test_metrics['precision']:.2f}%")
529
+ metric_cols[2].metric("Recall", f"{test_metrics['recall']:.2f}%")
530
+ metric_cols[3].metric("F1-Score", f"{test_metrics['f1']:.2f}%")
531
+
532
+ metric_cols2 = st.columns(4)
533
+ metric_cols2[0].metric("AUC", f"{test_metrics['auc']:.4f}")
534
+ metric_cols2[1].metric("G-mean", f"{test_metrics['g_mean']:.2f}%")
535
+ metric_cols2[2].metric("P-mean", f"{test_metrics['p_mean']:.2f}%")
536
+ metric_cols2[3].metric("Specificity", f"{test_metrics['specificity']:.2f}%")
537
+
538
+ # 混淆矩陣
539
+ with st.expander("Confusion Matrix", expanded=True):
540
+ conf_fig_test = plot_confusion_matrix(
541
+ test_metrics['confusion_matrix'],
542
+ title="Test Set"
543
+ )
544
+ st.plotly_chart(conf_fig_test, use_container_width=True)
545
+
546
+ # ROC Curve
547
+ with st.expander("ROC Curve", expanded=False):
548
+ roc_fig_test = plot_roc_curve(
549
+ test_metrics['fpr'],
550
+ test_metrics['tpr'],
551
+ test_metrics['auc'],
552
+ title="Test Set"
553
+ )
554
+ st.plotly_chart(roc_fig_test, use_container_width=True)
555
 
556
+ # Tab 3: 條件機率表
557
+ with result_tabs[2]:
558
+ selected_node = st.selectbox(
559
+ "Select a node to view its CPD:",
560
+ options=list(results['cpds'].keys())
 
 
 
 
 
 
 
 
 
561
  )
562
+
563
+ if selected_node:
564
+ cpd_df = create_cpd_table(results['cpds'][selected_node])
565
+ st.dataframe(cpd_df, use_container_width=True)
566
+
567
+ # 下載按鈕
568
+ csv = cpd_df.to_csv()
569
+ st.download_button(
570
+ label="📥 Download CPD as CSV",
571
+ data=csv,
572
+ file_name=f"cpd_{selected_node}.csv",
573
+ mime="text/csv"
574
+ )
 
 
 
 
 
 
 
 
 
 
 
 
575
 
576
+ # Tab 4: 模型評分
577
+ with result_tabs[3]:
578
+ scores = results['scores']
579
+
580
+ score_cols = st.columns(5)
581
+ score_cols[0].metric("Log-Likelihood", f"{scores['log_likelihood']:.2f}")
582
+ score_cols[1].metric("BIC Score", f"{scores['bic']:.2f}")
583
+ score_cols[2].metric("K2 Score", f"{scores['k2']:.2f}")
584
+ score_cols[3].metric("BDeu Score", f"{scores['bdeu']:.2f}")
585
+ score_cols[4].metric("BDs Score", f"{scores['bds']:.2f}")
586
+
587
+ # 參數摘要
588
+ with st.expander("Analysis Parameters", expanded=True):
589
+ params = results['parameters']
590
+
591
+ col1, col2, col3 = st.columns(3)
592
+
593
+ with col1:
594
+ st.markdown("**Algorithm Settings**")
595
+ st.write(f"- Algorithm: {params['algorithm']}")
596
+ st.write(f"- Estimator: {params['estimator']}")
597
+ st.write(f"- Test Fraction: {params['test_fraction']:.2%}")
598
+
599
+ with col2:
600
+ st.markdown("**Feature Information**")
601
+ st.write(f"- Total Features: {params['n_features']}")
602
+ st.write(f"- Categorical: {len(params['cat_features'])}")
603
+ st.write(f"- Continuous: {len(params['con_features'])}")
604
+ st.write(f"- Target: {params['target_variable']}")
605
+
606
+ with col3:
607
+ st.markdown("**Other Parameters**")
608
+ st.write(f"- Bins: {params['n_bins']}")
609
+ st.write(f"- Score Method: {params['score_method']}")
610
+ st.write(f"- Significance Level: {params['sig_level']}")
611
+ st.write(f"- Equivalent Sample Size: {params['equivalent_sample_size']}")
612
+
613
+ # 匯出結果
614
+ with st.expander("Export Results", expanded=False):
615
+ result_json = export_results_to_json(results)
616
+ st.download_button(
617
+ label="📥 Download Full Results (JSON)",
618
+ data=result_json,
619
+ file_name=f"bn_analysis_{results['timestamp'][:10]}.json",
620
+ mime="application/json"
621
+ )
622
 
623
  # Tab 2: AI 助手
624
  with tab2: