Gourav18 commited on
Commit
40a5362
·
verified ·
1 Parent(s): d8e3ff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -13
app.py CHANGED
@@ -231,23 +231,44 @@ def get_classification_report(y_true, y_pred):
231
  report_dict = classification_report(y_true, y_pred, output_dict=True)
232
  df = pd.DataFrame(report_dict).transpose()
233
  return df
 
 
 
 
 
234
  def evaluate_models(X_train, X_test, y_train, y_test):
235
- models =get_model_configs()
236
 
237
  results = {}
238
 
239
  plt.figure(figsize=(10, 6))
240
 
 
 
241
  for name, model in models.items():
242
  model.fit(X_train, y_train)
243
  y_pred = model.predict(X_test)
244
- y_prob = model.predict_proba(X_test)[:, 1] if hasattr(model, "predict_proba") else None
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
  accuracy = accuracy_score(y_test, y_pred)
247
- precision = precision_score(y_test, y_pred)
248
- recall = recall_score(y_test, y_pred, average='weighted')
249
- f1 = f1_score(y_test, y_pred, average='weighted')
250
- roc_auc = roc_auc_score(y_test, y_prob) if y_prob is not None else None
 
 
251
 
252
  results[name] = {
253
  "Accuracy": accuracy,
@@ -257,16 +278,17 @@ def evaluate_models(X_train, X_test, y_train, y_test):
257
  "ROC-AUC": roc_auc
258
  }
259
 
260
- if y_prob is not None:
261
  fpr, tpr, _ = roc_curve(y_test, y_prob)
262
  plt.plot(fpr, tpr, label=f"{name} (AUC = {roc_auc:.2f})")
263
 
264
- plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
265
- plt.xlabel("False Positive Rate")
266
- plt.ylabel("True Positive Rate")
267
- plt.title("ROC Curves")
268
- plt.legend()
269
- plt.show()
 
270
 
271
  fig, axes = plt.subplots(2, 2, figsize=(12, 10))
272
  for ax, (name, model) in zip(axes.ravel(), models.items()):
 
231
  report_dict = classification_report(y_true, y_pred, output_dict=True)
232
  df = pd.DataFrame(report_dict).transpose()
233
  return df
234
+ from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, roc_curve, confusion_matrix
235
+ import matplotlib.pyplot as plt
236
+ import seaborn as sns
237
+ import pandas as pd
238
+
239
  def evaluate_models(X_train, X_test, y_train, y_test):
240
+ models = get_model_configs()
241
 
242
  results = {}
243
 
244
  plt.figure(figsize=(10, 6))
245
 
246
+ num_classes = len(set(y_test)) # Determine if the target is binary or multiclass
247
+
248
  for name, model in models.items():
249
  model.fit(X_train, y_train)
250
  y_pred = model.predict(X_test)
251
+
252
+ # Handle predict_proba for multiclass classification
253
+ if hasattr(model, "predict_proba"):
254
+ y_prob = model.predict_proba(X_test)
255
+ if num_classes == 2:
256
+ y_prob = y_prob[:, 1] # Binary classification
257
+ else:
258
+ y_prob = None # Ignore for multiclass
259
+ else:
260
+ y_prob = None # If the model doesn't support predict_proba
261
+
262
+ # Adjust 'average' based on the number of classes
263
+ average_type = 'binary' if num_classes == 2 else 'weighted'
264
 
265
  accuracy = accuracy_score(y_test, y_pred)
266
+ precision = precision_score(y_test, y_pred, average=average_type)
267
+ recall = recall_score(y_test, y_pred, average=average_type)
268
+ f1 = f1_score(y_test, y_pred, average=average_type)
269
+
270
+ # ROC-AUC only for binary classification
271
+ roc_auc = roc_auc_score(y_test, y_prob) if y_prob is not None and num_classes == 2 else None
272
 
273
  results[name] = {
274
  "Accuracy": accuracy,
 
278
  "ROC-AUC": roc_auc
279
  }
280
 
281
+ if y_prob is not None and num_classes == 2: # ROC curve only for binary
282
  fpr, tpr, _ = roc_curve(y_test, y_prob)
283
  plt.plot(fpr, tpr, label=f"{name} (AUC = {roc_auc:.2f})")
284
 
285
+ if num_classes == 2: # Plot ROC curve only for binary classification
286
+ plt.plot([0, 1], [0, 1], linestyle="--", color="gray")
287
+ plt.xlabel("False Positive Rate")
288
+ plt.ylabel("True Positive Rate")
289
+ plt.title("ROC Curves")
290
+ plt.legend()
291
+ plt.show()
292
 
293
  fig, axes = plt.subplots(2, 2, figsize=(12, 10))
294
  for ax, (name, model) in zip(axes.ravel(), models.items()):