File size: 14,471 Bytes
e478478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
"""
Explainability Module - InsightGenAI
====================================
SHAP-based model explainability with feature importance plots,
summary plots, and individual prediction explanations.

Author: InsightGenAI Team
Version: 1.0.0
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Dict, List, Tuple, Optional, Any, Union
import streamlit as st
import warnings
warnings.filterwarnings('ignore')

# Try to import shap, handle if not available
try:
    import shap
    SHAP_AVAILABLE = True
except ImportError:
    SHAP_AVAILABLE = False


class ExplainabilityEngine:
    """
    Model explainability engine using SHAP values.
    
    Attributes:
        model: Trained model to explain
        X: Feature matrix
        explainer: SHAP explainer object
        shap_values: Calculated SHAP values
    """
    
    def __init__(self, model, X: pd.DataFrame, feature_names: Optional[List[str]] = None):
        """
        Initialize the Explainability Engine.
        
        Args:
            model: Trained model
            X: Feature data (sample for background)
            feature_names: List of feature names
        """
        if not SHAP_AVAILABLE:
            raise ImportError("SHAP is not installed. Please install with: pip install shap")
        
        self.model = model
        self.X = X.copy() if isinstance(X, pd.DataFrame) else pd.DataFrame(X)
        self.feature_names = feature_names or self.X.columns.tolist()
        self.X.columns = self.feature_names
        
        self.explainer = None
        self.shap_values = None
        self.expected_value = None
        
        # Initialize SHAP explainer
        self._init_explainer()
    
    def _init_explainer(self) -> None:
        """Initialize the appropriate SHAP explainer for the model."""
        try:
            # Try TreeExplainer first (for tree-based models)
            self.explainer = shap.TreeExplainer(self.model)
            self.shap_values = self.explainer.shap_values(self.X)
            self.expected_value = self.explainer.expected_value
        except Exception:
            try:
                # Fall back to KernelExplainer
                self.explainer = shap.KernelExplainer(self.model.predict, shap.sample(self.X, 100))
                self.shap_values = self.explainer.shap_values(self.X)
                self.expected_value = self.explainer.expected_value
            except Exception as e:
                raise RuntimeError(f"Could not initialize SHAP explainer: {str(e)}")
    
    def get_feature_importance(self) -> pd.DataFrame:
        """
        Get global feature importance based on mean absolute SHAP values.
        
        Returns:
            pd.DataFrame with feature importance
        """
        if self.shap_values is None:
            raise ValueError("SHAP values not calculated. Please initialize explainer first.")
        
        # Handle different shap_values formats
        if isinstance(self.shap_values, list):
            # For multi-class, use the mean across all classes
            shap_array = np.abs(np.array(self.shap_values)).mean(axis=0).mean(axis=0)
        else:
            shap_array = np.abs(self.shap_values).mean(axis=0)
        
        importance_df = pd.DataFrame({
            'feature': self.feature_names,
            'importance': shap_array
        }).sort_values('importance', ascending=False)
        
        return importance_df
    
    def plot_summary(self, max_display: int = 15, figsize: Tuple[int, int] = (10, 8)) -> plt.Figure:
        """
        Create SHAP summary plot (beeswarm plot).
        
        Args:
            max_display: Maximum number of features to display
            figsize: Figure size tuple
            
        Returns:
            matplotlib Figure object
        """
        fig, ax = plt.subplots(figsize=figsize)
        
        # Handle different shap_values formats
        if isinstance(self.shap_values, list):
            # For multi-class classification, use the first class
            shap_values_plot = self.shap_values[0]
        else:
            shap_values_plot = self.shap_values
        
        shap.summary_plot(
            shap_values_plot, 
            self.X, 
            feature_names=self.feature_names,
            max_display=max_display,
            show=False
        )
        
        plt.title('SHAP Summary Plot', fontsize=14, fontweight='bold', pad=20)
        plt.tight_layout()
        return fig
    
    def plot_feature_importance(self, max_display: int = 15, 
                                figsize: Tuple[int, int] = (10, 8)) -> plt.Figure:
        """
        Create bar plot of feature importance.
        
        Args:
            max_display: Maximum number of features to display
            figsize: Figure size tuple
            
        Returns:
            matplotlib Figure object
        """
        fig, ax = plt.subplots(figsize=figsize)
        
        # Handle different shap_values formats
        if isinstance(self.shap_values, list):
            shap_values_plot = self.shap_values[0]
        else:
            shap_values_plot = self.shap_values
        
        shap.summary_plot(
            shap_values_plot, 
            self.X, 
            feature_names=self.feature_names,
            max_display=max_display,
            plot_type='bar',
            show=False
        )
        
        plt.title('SHAP Feature Importance', fontsize=14, fontweight='bold', pad=20)
        plt.tight_layout()
        return fig
    
    def plot_waterfall(self, instance_idx: int = 0, 
                       max_display: int = 10, 
                       figsize: Tuple[int, int] = (12, 6)) -> plt.Figure:
        """
        Create waterfall plot for a single prediction.
        
        Args:
            instance_idx: Index of the instance to explain
            max_display: Maximum number of features to display
            figsize: Figure size tuple
            
        Returns:
            matplotlib Figure object
        """
        fig, ax = plt.subplots(figsize=figsize)
        
        # Handle different shap_values formats
        if isinstance(self.shap_values, list):
            shap_values_plot = self.shap_values[0]
            expected_value = self.expected_value[0] if isinstance(self.expected_value, (list, np.ndarray)) else self.expected_value
        else:
            shap_values_plot = self.shap_values
            expected_value = self.expected_value
        
        shap.waterfall_plot(
            shap.Explanation(
                values=shap_values_plot[instance_idx],
                base_values=expected_value,
                data=self.X.iloc[instance_idx].values,
                feature_names=self.feature_names
            ),
            max_display=max_display,
            show=False
        )
        
        plt.title(f'SHAP Waterfall Plot - Instance {instance_idx}', fontsize=14, fontweight='bold', pad=20)
        plt.tight_layout()
        return fig
    
    def plot_dependence(self, feature: str, 
                       interaction_feature: Optional[str] = None,
                       figsize: Tuple[int, int] = (10, 6)) -> plt.Figure:
        """
        Create dependence plot for a feature.
        
        Args:
            feature: Feature name to plot
            interaction_feature: Feature to use for coloring
            figsize: Figure size tuple
            
        Returns:
            matplotlib Figure object
        """
        fig, ax = plt.subplots(figsize=figsize)
        
        # Handle different shap_values formats
        if isinstance(self.shap_values, list):
            shap_values_plot = self.shap_values[0]
        else:
            shap_values_plot = self.shap_values
        
        feature_idx = self.feature_names.index(feature) if feature in self.feature_names else None
        
        if feature_idx is not None:
            shap.dependence_plot(
                feature_idx,
                shap_values_plot,
                self.X,
                feature_names=self.feature_names,
                interaction_index=interaction_feature,
                show=False,
                ax=ax
            )
            plt.title(f'SHAP Dependence Plot: {feature}', fontsize=14, fontweight='bold', pad=20)
        
        plt.tight_layout()
        return fig
    
    def explain_instance(self, instance_idx: int) -> Dict:
        """
        Get explanation for a single instance.
        
        Args:
            instance_idx: Index of the instance
            
        Returns:
            Dict with explanation details
        """
        if isinstance(self.shap_values, list):
            shap_values = self.shap_values[0][instance_idx]
            expected_value = self.expected_value[0] if isinstance(self.expected_value, (list, np.ndarray)) else self.expected_value
        else:
            shap_values = self.shap_values[instance_idx]
            expected_value = self.expected_value
        
        # Create feature contribution dataframe
        contributions = pd.DataFrame({
            'feature': self.feature_names,
            'value': self.X.iloc[instance_idx].values,
            'shap_value': shap_values,
            'abs_shap_value': np.abs(shap_values)
        }).sort_values('abs_shap_value', ascending=False)
        
        prediction = expected_value + shap_values.sum()
        
        return {
            'instance_index': instance_idx,
            'expected_value': expected_value,
            'prediction': prediction,
            'contributions': contributions.to_dict('records')
        }
    
    def get_global_explanations(self) -> Dict:
        """
        Get global explanations for the model.
        
        Returns:
            Dict with global explanation metrics
        """
        importance_df = self.get_feature_importance()
        
        return {
            'top_features': importance_df.head(10).to_dict('records'),
            'feature_count': len(self.feature_names),
            'mean_shap_value': np.abs(self.shap_values).mean() if not isinstance(self.shap_values, list) else np.abs(np.array(self.shap_values)).mean()
        }


class FallbackExplainability:
    """
    Fallback explainability engine when SHAP is not available.
    Uses built-in feature importance from models.
    """
    
    def __init__(self, model, X: pd.DataFrame, feature_names: Optional[List[str]] = None):
        """
        Initialize fallback explainability.
        
        Args:
            model: Trained model
            X: Feature data
            feature_names: List of feature names
        """
        self.model = model
        self.X = X.copy() if isinstance(X, pd.DataFrame) else pd.DataFrame(X)
        self.feature_names = feature_names or self.X.columns.tolist()
    
    def get_feature_importance(self) -> pd.DataFrame:
        """Get feature importance from model."""
        if hasattr(self.model, 'feature_importances_'):
            importance = self.model.feature_importances_
        elif hasattr(self.model, 'coef_'):
            importance = np.abs(self.model.coef_)
            if importance.ndim > 1:
                importance = importance.mean(axis=0)
        else:
            # Use permutation importance as fallback
            from sklearn.inspection import permutation_importance
            perm_importance = permutation_importance(self.model, self.X, 
                                                      np.zeros(len(self.X)), 
                                                      n_repeats=5, random_state=42)
            importance = perm_importance.importances_mean
        
        importance_df = pd.DataFrame({
            'feature': self.feature_names,
            'importance': importance
        }).sort_values('importance', ascending=False)
        
        return importance_df
    
    def plot_feature_importance(self, max_display: int = 15, 
                                figsize: Tuple[int, int] = (10, 8)) -> plt.Figure:
        """Create bar plot of feature importance."""
        importance_df = self.get_feature_importance().head(max_display)
        
        fig, ax = plt.subplots(figsize=figsize)
        sns.barplot(data=importance_df, y='feature', x='importance', ax=ax, palette='viridis')
        ax.set_title('Feature Importance (Model Built-in)', fontsize=14, fontweight='bold')
        ax.set_xlabel('Importance')
        ax.set_ylabel('Feature')
        plt.tight_layout()
        return fig


def create_explainer(model, X: pd.DataFrame, feature_names: Optional[List[str]] = None):
    """
    Factory function to create appropriate explainer.
    
    Args:
        model: Trained model
        X: Feature data
        feature_names: List of feature names
        
    Returns:
        ExplainabilityEngine or FallbackExplainability instance
    """
    if SHAP_AVAILABLE:
        try:
            return ExplainabilityEngine(model, X, feature_names)
        except Exception as e:
            st.warning(f"SHAP explainer failed, using fallback: {str(e)}")
            return FallbackExplainability(model, X, feature_names)
    else:
        return FallbackExplainability(model, X, feature_names)


# Streamlit display functions
def display_shap_explanations(explainer, X_sample: pd.DataFrame = None):
    """Display SHAP explanations in Streamlit."""
    st.subheader("🔍 Model Explainability")
    
    if not SHAP_AVAILABLE:
        st.warning("SHAP is not installed. Using built-in feature importance instead.")
    
    # Feature importance
    st.write("### Feature Importance")
    fig_importance = explainer.plot_feature_importance()
    st.pyplot(fig_importance)
    
    # Summary plot (only for SHAP)
    if isinstance(explainer, ExplainabilityEngine):
        st.write("### SHAP Summary Plot")
        try:
            fig_summary = explainer.plot_summary()
            st.pyplot(fig_summary)
        except Exception as e:
            st.warning(f"Could not generate summary plot: {str(e)}")
        
        # Waterfall plot for first instance
        if X_sample is not None and len(X_sample) > 0:
            st.write("### Individual Prediction Explanation")
            try:
                fig_waterfall = explainer.plot_waterfall(instance_idx=0)
                st.pyplot(fig_waterfall)
            except Exception as e:
                st.warning(f"Could not generate waterfall plot: {str(e)}")