Synav commited on
Commit
df64d6f
·
verified ·
1 Parent(s): 7e199ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -9
app.py CHANGED
@@ -264,6 +264,11 @@ def train_and_save(df: pd.DataFrame, feature_cols, num_cols, cat_cols, n_bins: i
264
  "thresholds": [float(x) for x in roc_thresholds],
265
  },
266
  }
 
 
 
 
 
267
 
268
 
269
  joblib.dump(pipe, "model.joblib")
@@ -535,11 +540,7 @@ with tab_train:
535
  c8.metric("Test N", m["n_test"])
536
 
537
 
538
- metrics.update({
539
- "pr_curve": pr,
540
- "calibration": cal,
541
- "decision_curve": dca,
542
- })
543
 
544
 
545
  # Confusion matrix display
@@ -564,8 +565,11 @@ with tab_train:
564
 
565
  st.divider()
566
  st.subheader("Precision–Recall (PR) Curve")
567
-
568
- pr = meta["metrics"]["pr_curve"]
 
 
 
569
  c1, c2 = st.columns(2)
570
  c1.metric("Average Precision (AP)", f"{pr['average_precision']:.3f}")
571
 
@@ -579,7 +583,10 @@ with tab_train:
579
  st.divider()
580
  st.subheader("Calibration (Reliability Plot)")
581
 
582
- cal = meta["metrics"]["calibration"]
 
 
 
583
  c1, c2 = st.columns(2)
584
  c1.metric("Brier score", f"{cal['brier']:.4f}")
585
  c2.write(f"Bins: {cal['n_bins']} | Strategy: {cal['strategy']}")
@@ -595,8 +602,12 @@ with tab_train:
595
 
596
  st.divider()
597
  st.subheader("Decision Curve Analysis (Clinical Usefulness)")
 
 
 
 
598
 
599
- dca = meta["metrics"]["decision_curve"]
600
 
601
  fig_dca = plt.figure()
602
  plt.plot(dca["thresholds"], dca["net_benefit_model"])
 
264
  "thresholds": [float(x) for x in roc_thresholds],
265
  },
266
  }
267
+ metrics.update({
268
+ "pr_curve": pr,
269
+ "calibration": cal,
270
+ "decision_curve": dca,
271
+ })
272
 
273
 
274
  joblib.dump(pipe, "model.joblib")
 
540
  c8.metric("Test N", m["n_test"])
541
 
542
 
543
+
 
 
 
 
544
 
545
 
546
  # Confusion matrix display
 
565
 
566
  st.divider()
567
  st.subheader("Precision–Recall (PR) Curve")
568
+ if "pr_curve" not in m:
569
+ st.warning("PR curve not available in this model metadata. Retrain the model to generate it.")
570
+ st.stop()
571
+
572
+ pr = m["pr_curve"]
573
  c1, c2 = st.columns(2)
574
  c1.metric("Average Precision (AP)", f"{pr['average_precision']:.3f}")
575
 
 
583
  st.divider()
584
  st.subheader("Calibration (Reliability Plot)")
585
 
586
+ if "calibration" not in m:
587
+ st.warning("calibration not available in this model metadata. Retrain the model to generate it.")
588
+ st.stop()
589
+ cal = m["calibration"]
590
  c1, c2 = st.columns(2)
591
  c1.metric("Brier score", f"{cal['brier']:.4f}")
592
  c2.write(f"Bins: {cal['n_bins']} | Strategy: {cal['strategy']}")
 
602
 
603
  st.divider()
604
  st.subheader("Decision Curve Analysis (Clinical Usefulness)")
605
+
606
+ if "decision_curve" not in m:
607
+ st.warning("decision_curve not available in this model metadata. Retrain the model to generate it.")
608
+ st.stop()
609
 
610
+ dca = m["decision_curve"]
611
 
612
  fig_dca = plt.figure()
613
  plt.plot(dca["thresholds"], dca["net_benefit_model"])