DanielGallagherIRE commited on
Commit
6d26940
·
verified ·
1 Parent(s): 663966e

Upload metrics.py

Browse files
Files changed (1) hide show
  1. grewtse/evaluators/metrics.py +1 -23
grewtse/evaluators/metrics.py CHANGED
@@ -75,30 +75,8 @@ def calculate_accuracy(df: pd.DataFrame) -> float:
75
  return correct / total if total > 0 else 0.0
76
 
77
  def calculate_all_metrics(df: pd.DataFrame) -> dict:
78
- predictions = get_predictions(df)
79
- true_labels = np.ones(len(df), dtype=int)
80
-
81
- # Calculate confusion matrix components
82
- tp = np.sum((predictions == 1) & (true_labels == 1))
83
- fp = np.sum((predictions == 1) & (true_labels == 0))
84
- fn = np.sum((predictions == 0) & (true_labels == 1))
85
- tn = np.sum((predictions == 0) & (true_labels == 0))
86
-
87
- total = len(predictions)
88
-
89
- # Calculate metrics
90
- accuracy = (tp + tn) / total if total > 0 else 0.0
91
- precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
92
- recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
93
- f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
94
 
95
  return {
96
  'accuracy': round(accuracy,2),
97
- 'precision': round(precision, 2),
98
- 'recall': round(recall, 2),
99
- 'f1': round(f1, 2),
100
- 'true_positives': int(tp),
101
- 'false_positives': int(fp),
102
- 'false_negatives': int(fn),
103
- 'true_negatives': int(tn)
104
  }
 
75
  return correct / total if total > 0 else 0.0
76
 
77
  def calculate_all_metrics(df: pd.DataFrame) -> dict:
78
+ accuracy = calculate_accuracy(df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  return {
81
  'accuracy': round(accuracy,2),
 
 
 
 
 
 
 
82
  }