sree4411 commited on
Commit
1ca4934
Β·
verified Β·
1 Parent(s): 4cd7e41

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.set_page_config(page_title="ML Performance Metrics", layout="wide")
4
+
5
+ # Title
6
+ st.title("πŸ“Š Machine Learning Performance Metrics")
7
+ st.markdown("Understand key metrics used to evaluate ML models β€” for both **classification** and **regression** problems.")
8
+
9
+ st.markdown("---")
10
+
11
+ # Tabs for Classification and Regression
12
+ tab1, tab2 = st.tabs(["🧠 Classification Metrics", "πŸ“ˆ Regression Metrics"])
13
+
14
+ # ======================== CLASSIFICATION ========================
15
+ with tab1:
16
+ st.header("🧠 Classification Metrics")
17
+ st.markdown("These metrics evaluate how well a model classifies input data into distinct categories.")
18
+
19
+ with st.expander("🎯 Accuracy"):
20
+ st.latex(r"Accuracy = \frac{TP + TN}{TP + TN + FP + FN}")
21
+ st.markdown("Proportion of correct predictions out of all predictions.")
22
+ st.info("βœ”οΈ Best used when classes are balanced.")
23
+
24
+ with st.expander("🎯 Precision"):
25
+ st.latex(r"Precision = \frac{TP}{TP + FP}")
26
+ st.markdown("How many predicted positives are actually correct?")
27
+ st.info("βœ”οΈ Important when False Positives are costly (e.g., spam detection).")
28
+
29
+ with st.expander("🎯 Recall (Sensitivity)"):
30
+ st.latex(r"Recall = \frac{TP}{TP + FN}")
31
+ st.markdown("How many actual positives did we correctly predict?")
32
+ st.info("βœ”οΈ Critical when False Negatives are costly (e.g., disease detection).")
33
+
34
+ with st.expander("🎯 F1 Score"):
35
+ st.latex(r"F1 = 2 \cdot \frac{Precision \cdot Recall}{Precision + Recall}")
36
+ st.markdown("Harmonic mean of precision and recall.")
37
+ st.info("βœ”οΈ Good balance when you need both precision and recall.")
38
+
39
+ with st.expander("🎯 ROC-AUC Score"):
40
+ st.markdown("Area under the ROC curve β€” plots **True Positive Rate** vs **False Positive Rate**.")
41
+ st.image("https://upload.wikimedia.org/wikipedia/commons/6/6b/Roccurves.png", caption="ROC Curves Example", use_column_width=True)
42
+ st.info("βœ”οΈ Best when evaluating probabilistic classifiers.")
43
+
44
+ st.markdown("---")
45
+ st.success("πŸ“Œ Tip: Always use multiple metrics for a complete performance picture.")
46
+
47
+ # ======================== REGRESSION ========================
48
+ with tab2:
49
+ st.header("πŸ“ˆ Regression Metrics")
50
+ st.markdown("These metrics evaluate how well a model predicts continuous numerical values.")
51
+
52
+ with st.expander("🎯 Mean Absolute Error (MAE)"):
53
+ st.latex(r"MAE = \frac{1}{n} \sum_{i=1}^{n} |y_i - \hat{y}_i|")
54
+ st.markdown("Average of absolute differences between actual and predicted values.")
55
+ st.info("βœ”οΈ Less sensitive to outliers.")
56
+
57
+ with st.expander("🎯 Mean Squared Error (MSE)"):
58
+ st.latex(r"MSE = \frac{1}{n} \sum_{i=1}^{n} (y_i - \hat{y}_i)^2")
59
+ st.markdown("Average of squared differences. Penalizes larger errors more.")
60
+ st.info("βœ”οΈ Good for highlighting large deviations.")
61
+
62
+ with st.expander("🎯 Root Mean Squared Error (RMSE)"):
63
+ st.latex(r"RMSE = \sqrt{MSE}")
64
+ st.markdown("Square root of MSE. Same units as target variable.")
65
+ st.info("βœ”οΈ Interpretable on the same scale as the original data.")
66
+
67
+ with st.expander("🎯 R² Score (Coefficient of Determination)"):
68
+ st.latex(r"R^2 = 1 - \frac{\sum (y_i - \hat{y}_i)^2}{\sum (y_i - \bar{y})^2}")
69
+ st.markdown("Measures proportion of variance explained by the model.")
70
+ st.info("βœ”οΈ Ranges from 0 to 1. Closer to 1 means better model.")
71
+
72
+ st.markdown("---")
73
+ st.success("πŸ“Œ Tip: Use MAE when you care about average error, and RMSE when you want to penalize large errors.")