Yatheshr commited on
Commit
18cac6a
Β·
verified Β·
1 Parent(s): abb88cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -15
app.py CHANGED
@@ -4,7 +4,10 @@ import numpy as np
4
  from sklearn.model_selection import train_test_split
5
  from sklearn.preprocessing import StandardScaler, LabelEncoder
6
  from sklearn.ensemble import RandomForestClassifier
7
- from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report
 
 
 
8
  import seaborn as sns
9
  import matplotlib.pyplot as plt
10
 
@@ -58,9 +61,31 @@ def train_and_evaluate_model():
58
  prec = precision_score(y_test_labels, y_pred_labels, average='weighted')
59
  rec = recall_score(y_test_labels, y_pred_labels, average='weighted')
60
  f1 = f1_score(y_test_labels, y_pred_labels, average='weighted')
61
- clf_report = classification_report(y_test_labels, y_pred_labels)
62
 
63
- # Step 10: Confusion matrix
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  cm = confusion_matrix(y_test_labels, y_pred_labels, labels=le.classes_)
65
  plt.figure(figsize=(6, 5))
66
  sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
@@ -68,25 +93,19 @@ def train_and_evaluate_model():
68
  plt.xlabel("Predicted")
69
  plt.ylabel("Actual")
70
  plt.title("Confusion Matrix")
71
-
72
- # Step 11: Save the confusion matrix plot
73
  cm_path = "confusion_matrix.png"
74
- plt.savefig(cm_path)
75
  plt.close()
76
 
77
- # Step 12: Combine output (correct Markdown formatting)
78
  output = f"""
79
  ### βœ… Evaluation Metrics:
80
  - **Accuracy:** {acc:.2f}
81
  - **Precision:** {prec:.2f}
82
  - **Recall:** {rec:.2f}
83
  - **F1 Score:** {f1:.2f}
84
-
85
- ---
86
-
87
- ### πŸ“Š Classification Report:
88
  """
89
- return output, cm_path
90
 
91
 
92
  # Gradio Interface
@@ -96,9 +115,10 @@ with gr.Blocks() as demo:
96
 
97
  eval_btn = gr.Button("Click here... To Run Model Evaluation")
98
  output_md = gr.Markdown()
99
- output_img = gr.Image(type="filepath")
 
100
 
101
- eval_btn.click(fn=train_and_evaluate_model, outputs=[output_md, output_img])
 
102
 
103
- # Launch app
104
  demo.launch()
 
4
  from sklearn.model_selection import train_test_split
5
  from sklearn.preprocessing import StandardScaler, LabelEncoder
6
  from sklearn.ensemble import RandomForestClassifier
7
+ from sklearn.metrics import (
8
+ accuracy_score, precision_score, recall_score, f1_score,
9
+ confusion_matrix, classification_report
10
+ )
11
  import seaborn as sns
12
  import matplotlib.pyplot as plt
13
 
 
61
  prec = precision_score(y_test_labels, y_pred_labels, average='weighted')
62
  rec = recall_score(y_test_labels, y_pred_labels, average='weighted')
63
  f1 = f1_score(y_test_labels, y_pred_labels, average='weighted')
 
64
 
65
+ # Step 10: Create Classification Report as DataFrame
66
+ report_dict = classification_report(y_test_labels, y_pred_labels, output_dict=True)
67
+ report_df = pd.DataFrame(report_dict).transpose().round(2)
68
+
69
+ # Step 11: Plot classification report as table with grid
70
+ fig, ax = plt.subplots(figsize=(8, 4))
71
+ ax.axis('off')
72
+ tbl = ax.table(
73
+ cellText=report_df.values,
74
+ colLabels=report_df.columns,
75
+ rowLabels=report_df.index,
76
+ cellLoc='center',
77
+ loc='center'
78
+ )
79
+ tbl.auto_set_font_size(False)
80
+ tbl.set_fontsize(10)
81
+ tbl.scale(1.2, 1.2)
82
+ for key, cell in tbl.get_celld().items():
83
+ cell.set_linewidth(0.8)
84
+ cr_path = "classification_report.png"
85
+ plt.savefig(cr_path, bbox_inches='tight')
86
+ plt.close()
87
+
88
+ # Step 12: Confusion matrix
89
  cm = confusion_matrix(y_test_labels, y_pred_labels, labels=le.classes_)
90
  plt.figure(figsize=(6, 5))
91
  sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
 
93
  plt.xlabel("Predicted")
94
  plt.ylabel("Actual")
95
  plt.title("Confusion Matrix")
 
 
96
  cm_path = "confusion_matrix.png"
97
+ plt.savefig(cm_path, bbox_inches='tight')
98
  plt.close()
99
 
100
+ # Step 13: Return outputs
101
  output = f"""
102
  ### βœ… Evaluation Metrics:
103
  - **Accuracy:** {acc:.2f}
104
  - **Precision:** {prec:.2f}
105
  - **Recall:** {rec:.2f}
106
  - **F1 Score:** {f1:.2f}
 
 
 
 
107
  """
108
+ return output, cr_path, cm_path
109
 
110
 
111
  # Gradio Interface
 
115
 
116
  eval_btn = gr.Button("Click here... To Run Model Evaluation")
117
  output_md = gr.Markdown()
118
+ report_img = gr.Image(type="filepath", label="πŸ“Š Classification Report")
119
+ cm_img = gr.Image(type="filepath", label="πŸ“‰ Confusion Matrix")
120
 
121
+ eval_btn.click(fn=train_and_evaluate_model,
122
+ outputs=[output_md, report_img, cm_img])
123
 
 
124
  demo.launch()