File size: 10,906 Bytes
198ccb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
"""Error analysis utilities for multi-label classification."""

import logging
from typing import List, Dict, Tuple, Optional
import torch
import pandas as pd
import numpy as np
from collections import defaultdict, Counter

from evaluation.metrics import per_class_metrics, confusion_matrix_per_class

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class ErrorAnalyzer:
    """
    Analyze classification errors for multi-label classification.
    
    Identifies common misclassification patterns, false positives/negatives,
    and provides insights for model improvement.
    """
    
    def __init__(self):
        """Initialize error analyzer."""
        pass
    
    def analyze_false_positives(
        self,
        target: torch.Tensor,
        y_pred: torch.Tensor,
        class_names: Optional[List[str]] = None
    ) -> Dict[str, List[int]]:
        """
        Identify false positive predictions per class.
        
        Args:
            target: Ground truth binary matrix [batch_size, num_classes]
            y_pred: Predicted binary matrix [batch_size, num_classes]
            class_names: Optional list of class names
            
        Returns:
            Dictionary mapping class name to list of sample indices with false positives
            
        Example:
            >>> analyzer = ErrorAnalyzer()
            >>> target = torch.tensor([[0, 1], [1, 0]])
            >>> pred = torch.tensor([[1, 1], [1, 0]])
            >>> fps = analyzer.analyze_false_positives(target, pred)
            >>> fps["class_0"]
            [0]
        """
        num_classes = target.shape[1]
        if class_names is None:
            class_names = [f"class_{i}" for i in range(num_classes)]
        
        false_positives = {name: [] for name in class_names}
        
        for i in range(num_classes):
            class_target = target[:, i]
            class_pred = y_pred[:, i]
            
            # False positives: predicted but not in target
            fp_mask = (class_pred == 1) & (class_target == 0)
            fp_indices = torch.where(fp_mask)[0].tolist()
            
            false_positives[class_names[i]] = fp_indices
        
        return false_positives
    
    def analyze_false_negatives(
        self,
        target: torch.Tensor,
        y_pred: torch.Tensor,
        class_names: Optional[List[str]] = None
    ) -> Dict[str, List[int]]:
        """
        Identify false negative predictions per class.
        
        Args:
            target: Ground truth binary matrix [batch_size, num_classes]
            y_pred: Predicted binary matrix [batch_size, num_classes]
            class_names: Optional list of class names
            
        Returns:
            Dictionary mapping class name to list of sample indices with false negatives
        """
        num_classes = target.shape[1]
        if class_names is None:
            class_names = [f"class_{i}" for i in range(num_classes)]
        
        false_negatives = {name: [] for name in class_names}
        
        for i in range(num_classes):
            class_target = target[:, i]
            class_pred = y_pred[:, i]
            
            # False negatives: in target but not predicted
            fn_mask = (class_pred == 0) & (class_target == 1)
            fn_indices = torch.where(fn_mask)[0].tolist()
            
            false_negatives[class_names[i]] = fn_indices
        
        return false_negatives
    
    def find_common_misclassification_patterns(
        self,
        target: torch.Tensor,
        y_pred: torch.Tensor,
        class_names: Optional[List[str]] = None,
        top_k: int = 10
    ) -> List[Tuple[Tuple[str, ...], Tuple[str, ...], int]]:
        """
        Find common patterns of misclassification.
        
        Identifies frequently co-occurring classes that are misclassified together.
        
        Args:
            target: Ground truth binary matrix [batch_size, num_classes]
            y_pred: Predicted binary matrix [batch_size, num_classes]
            class_names: Optional list of class names
            top_k: Number of top patterns to return
            
        Returns:
            List of tuples: (predicted_classes, actual_classes, count)
            Sorted by frequency (descending)
        """
        num_classes = target.shape[1]
        if class_names is None:
            class_names = [f"class_{i}" for i in range(num_classes)]
        
        patterns = Counter()
        
        for sample_idx in range(target.shape[0]):
            # Get predicted and actual classes
            pred_classes = tuple(sorted([
                class_names[i] for i in range(num_classes) if y_pred[sample_idx, i] == 1
            ]))
            actual_classes = tuple(sorted([
                class_names[i] for i in range(num_classes) if target[sample_idx, i] == 1
            ]))
            
            # Only count if there's a mismatch
            if pred_classes != actual_classes:
                patterns[(pred_classes, actual_classes)] += 1
        
        # Return top K patterns
        return patterns.most_common(top_k)
    
    def analyze_class_confusion(
        self,
        target: torch.Tensor,
        y_pred: torch.Tensor,
        class_names: Optional[List[str]] = None
    ) -> pd.DataFrame:
        """
        Analyze confusion between classes.
        
        Creates a confusion matrix showing which classes are frequently
        confused with each other.
        
        Args:
            target: Ground truth binary matrix [batch_size, num_classes]
            y_pred: Predicted binary matrix [batch_size, num_classes]
            class_names: Optional list of class names
            
        Returns:
            DataFrame with confusion analysis
        """
        num_classes = target.shape[1]
        if class_names is None:
            class_names = [f"class_{i}" for i in range(num_classes)]
        
        # Count confusions: when class A is predicted but class B is actual
        confusion_counts = defaultdict(int)
        
        for sample_idx in range(target.shape[0]):
            pred_indices = set(i for i in range(num_classes) if y_pred[sample_idx, i] == 1)
            actual_indices = set(i for i in range(num_classes) if target[sample_idx, i] == 1)
            
            # False positives: predicted but not actual
            for pred_idx in pred_indices - actual_indices:
                for actual_idx in actual_indices:
                    confusion_counts[(class_names[pred_idx], class_names[actual_idx])] += 1
        
        # Create DataFrame
        if confusion_counts:
            data = [
                {"predicted": pred, "actual": actual, "count": count}
                for (pred, actual), count in confusion_counts.items()
            ]
            df = pd.DataFrame(data)
            df = df.sort_values("count", ascending=False)
        else:
            df = pd.DataFrame(columns=["predicted", "actual", "count"])
        
        return df
    
    def get_error_summary(
        self,
        target: torch.Tensor,
        y_pred: torch.Tensor,
        class_names: Optional[List[str]] = None
    ) -> Dict:
        """
        Get comprehensive error summary.
        
        Args:
            target: Ground truth binary matrix [batch_size, num_classes]
            y_pred: Predicted binary matrix [batch_size, num_classes]
            class_names: Optional list of class names
            
        Returns:
            Dictionary with error statistics
        """
        num_classes = target.shape[1]
        if class_names is None:
            class_names = [f"class_{i}" for i in range(num_classes)]
        
        # Get per-class metrics
        per_class = per_class_metrics(target, y_pred, class_names)
        
        # Calculate totals
        total_fp = sum(metrics["fp"] for metrics in per_class.values())
        total_fn = sum(metrics["fn"] for metrics in per_class.values())
        total_tp = sum(metrics["tp"] for metrics in per_class.values())
        total_tn = sum(metrics["tn"] for metrics in per_class.values())
        
        # Find classes with most errors
        classes_by_fp = sorted(
            per_class.items(),
            key=lambda x: x[1]["fp"],
            reverse=True
        )[:10]
        
        classes_by_fn = sorted(
            per_class.items(),
            key=lambda x: x[1]["fn"],
            reverse=True
        )[:10]
        
        return {
            "total_samples": target.shape[0],
            "total_classes": num_classes,
            "total_false_positives": total_fp,
            "total_false_negatives": total_fn,
            "total_true_positives": total_tp,
            "total_true_negatives": total_tn,
            "fp_rate": total_fp / (total_fp + total_tn + 1e-5),
            "fn_rate": total_fn / (total_fn + total_tp + 1e-5),
            "top_fp_classes": [
                {"class": name, "count": metrics["fp"]}
                for name, metrics in classes_by_fp
            ],
            "top_fn_classes": [
                {"class": name, "count": metrics["fn"]}
                for name, metrics in classes_by_fn
            ],
            "per_class_metrics": per_class
        }
    
    def visualize_errors(
        self,
        target: torch.Tensor,
        y_pred: torch.Tensor,
        class_names: Optional[List[str]] = None
    ) -> Dict[str, pd.DataFrame]:
        """
        Create visualizations-ready DataFrames for error analysis.
        
        Args:
            target: Ground truth binary matrix [batch_size, num_classes]
            y_pred: Predicted binary matrix [batch_size, num_classes]
            class_names: Optional list of class names
            
        Returns:
            Dictionary with DataFrames for visualization
        """
        num_classes = target.shape[1]
        if class_names is None:
            class_names = [f"class_{i}" for i in range(num_classes)]
        
        # Per-class metrics DataFrame
        per_class = per_class_metrics(target, y_pred, class_names)
        metrics_df = pd.DataFrame(per_class).T
        
        # Confusion analysis DataFrame
        confusion_df = self.analyze_class_confusion(target, y_pred, class_names)
        
        # Error counts per class
        error_counts = []
        for name, metrics in per_class.items():
            error_counts.append({
                "class": name,
                "false_positives": metrics["fp"],
                "false_negatives": metrics["fn"],
                "true_positives": metrics["tp"],
                "true_negatives": metrics["tn"]
            })
        error_df = pd.DataFrame(error_counts)
        
        return {
            "per_class_metrics": metrics_df,
            "confusion_analysis": confusion_df,
            "error_counts": error_df
        }