Dyuti Dasmahapatra commited on
Commit
dd5a03c
Β·
1 Parent(s): a01dc02

Phase 2: Dashboard Integrated

Browse files
Files changed (3) hide show
  1. app.py +443 -0
  2. src/auditor.py +471 -0
  3. tests/test_advanced_features.py +140 -0
app.py CHANGED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ import gradio as gr
4
+ import sys
5
+ import os
6
+ import matplotlib.pyplot as plt
7
+ from PIL import Image
8
+ import numpy as np
9
+ import time
10
+ import torch
11
+
12
+ # Add src to path
13
+ sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
14
+
15
+ from model_loader import load_model_and_processor, SUPPORTED_MODELS
16
+ from predictor import predict_image, create_prediction_plot
17
+ from explainer import explain_attention, explain_gradcam, explain_gradient_shap
18
+ from auditor import create_auditors
19
+ from utils import preprocess_image, get_top_predictions_dict
20
+
21
+ # Global variables to cache model and processor
22
+ model = None
23
+ processor = None
24
+ current_model_name = None
25
+ auditors = None
26
+
27
+ def load_selected_model(model_name):
28
+ """Load the selected model and cache it globally."""
29
+ global model, processor, current_model_name, auditors
30
+
31
+ try:
32
+ if model is None or current_model_name != model_name:
33
+ print(f"Loading model: {model_name}")
34
+ model, processor = load_model_and_processor(model_name)
35
+ current_model_name = model_name
36
+
37
+ # Initialize auditors
38
+ auditors = create_auditors(model, processor)
39
+ print("βœ… Model and auditors loaded successfully!")
40
+
41
+ return f"βœ… Model loaded: {model_name}"
42
+
43
+ except Exception as e:
44
+ return f"❌ Error loading model: {str(e)}"
45
+
46
+ def analyze_image_basic(image, model_choice, xai_method, layer_index, head_index):
47
+ """
48
+ Basic explainability analysis - the core function for Tab 1.
49
+ """
50
+ try:
51
+ # Load model if needed
52
+ model_status = load_selected_model(SUPPORTED_MODELS[model_choice])
53
+ if "❌" in model_status:
54
+ return None, None, None, model_status
55
+
56
+ # Preprocess image
57
+ if image is None:
58
+ return None, None, None, "⚠️ Please upload an image first."
59
+
60
+ processed_image = preprocess_image(image)
61
+
62
+ # Get predictions
63
+ probs, indices, labels = predict_image(processed_image, model, processor)
64
+ pred_fig = create_prediction_plot(probs, labels)
65
+
66
+ # Generate explanation based on selected method
67
+ explanation_fig = None
68
+ explanation_image = None
69
+
70
+ if xai_method == "Attention Visualization":
71
+ explanation_fig = explain_attention(
72
+ model, processor, processed_image,
73
+ layer_index=layer_index, head_index=head_index
74
+ )
75
+
76
+ elif xai_method == "GradCAM":
77
+ explanation_fig, explanation_image = explain_gradcam(
78
+ model, processor, processed_image
79
+ )
80
+
81
+ elif xai_method == "GradientSHAP":
82
+ explanation_fig = explain_gradient_shap(
83
+ model, processor, processed_image, n_samples=3
84
+ )
85
+
86
+ # Convert predictions to dictionary for Gradio Label
87
+ pred_dict = get_top_predictions_dict(probs, labels)
88
+
89
+ return processed_image, pred_fig, explanation_fig, f"βœ… Analysis complete! Top prediction: {labels[0]} ({probs[0]:.2%})"
90
+
91
+ except Exception as e:
92
+ error_msg = f"❌ Analysis failed: {str(e)}"
93
+ print(error_msg)
94
+ return None, None, None, error_msg
95
+
96
+ def analyze_counterfactual(image, model_choice, patch_size, perturbation_type):
97
+ """
98
+ Counterfactual analysis for Tab 2.
99
+ """
100
+ try:
101
+ # Load model if needed
102
+ model_status = load_selected_model(SUPPORTED_MODELS[model_choice])
103
+ if "❌" in model_status:
104
+ return None, None, model_status
105
+
106
+ if image is None:
107
+ return None, None, "⚠️ Please upload an image first."
108
+
109
+ processed_image = preprocess_image(image)
110
+
111
+ # Perform counterfactual analysis
112
+ results = auditors['counterfactual'].patch_perturbation_analysis(
113
+ processed_image,
114
+ patch_size=patch_size,
115
+ perturbation_type=perturbation_type
116
+ )
117
+
118
+ # Create summary message
119
+ summary = (
120
+ f"πŸ” Counterfactual Analysis Complete!\n"
121
+ f"β€’ Avg confidence change: {results['avg_confidence_change']:.4f}\n"
122
+ f"β€’ Prediction flip rate: {results['prediction_flip_rate']:.2%}\n"
123
+ f"β€’ Most sensitive patch: {results['most_sensitive_patch']}"
124
+ )
125
+
126
+ return results['figure'], summary
127
+
128
+ except Exception as e:
129
+ error_msg = f"❌ Counterfactual analysis failed: {str(e)}"
130
+ print(error_msg)
131
+ return None, error_msg
132
+
133
+ def analyze_calibration(image, model_choice, n_bins):
134
+ """
135
+ Confidence calibration analysis for Tab 3.
136
+ """
137
+ try:
138
+ # Load model if needed
139
+ model_status = load_selected_model(SUPPORTED_MODELS[model_choice])
140
+ if "❌" in model_status:
141
+ return None, None, model_status
142
+
143
+ if image is None:
144
+ return None, None, "⚠️ Please upload an image first."
145
+
146
+ processed_image = preprocess_image(image)
147
+
148
+ # For demo purposes, create a simple test set from the uploaded image
149
+ # In a real scenario, you'd use a proper validation set
150
+ test_images = [processed_image] * 10 # Create multiple copies
151
+
152
+ # Perform calibration analysis
153
+ results = auditors['calibration'].analyze_calibration(
154
+ test_images, n_bins=n_bins
155
+ )
156
+
157
+ # Create summary message
158
+ metrics = results['metrics']
159
+ summary = (
160
+ f"πŸ“Š Calibration Analysis Complete!\n"
161
+ f"β€’ Mean confidence: {metrics['mean_confidence']:.3f}\n"
162
+ f"β€’ Overconfident rate: {metrics['overconfident_rate']:.2%}\n"
163
+ f"β€’ Underconfident rate: {metrics['underconfident_rate']:.2%}"
164
+ )
165
+
166
+ return results['figure'], summary
167
+
168
+ except Exception as e:
169
+ error_msg = f"❌ Calibration analysis failed: {str(e)}"
170
+ print(error_msg)
171
+ return None, error_msg
172
+
173
+ def analyze_bias_detection(image, model_choice):
174
+ """
175
+ Bias detection analysis for Tab 4.
176
+ """
177
+ try:
178
+ # Load model if needed
179
+ model_status = load_selected_model(SUPPORTED_MODELS[model_choice])
180
+ if "❌" in model_status:
181
+ return None, None, model_status
182
+
183
+ if image is None:
184
+ return None, None, "⚠️ Please upload an image first."
185
+
186
+ processed_image = preprocess_image(image)
187
+
188
+ # Create demo subgroups based on the uploaded image
189
+ # In a real scenario, you'd use predefined subgroups from your dataset
190
+ subsets = []
191
+ subset_names = ['Original', 'Brightness+', 'Brightness-', 'Contrast+']
192
+
193
+ # Original image
194
+ subsets.append([processed_image])
195
+
196
+ # Brightness increased
197
+ bright_image = processed_image.copy().point(lambda p: min(255, p * 1.5))
198
+ subsets.append([bright_image])
199
+
200
+ # Brightness decreased
201
+ dark_image = processed_image.copy().point(lambda p: p * 0.7)
202
+ subsets.append([dark_image])
203
+
204
+ # Contrast increased
205
+ contrast_image = processed_image.copy().point(lambda p: 128 + (p - 128) * 1.5)
206
+ subsets.append([contrast_image])
207
+
208
+ # Perform bias analysis
209
+ results = auditors['bias'].analyze_subgroup_performance(
210
+ subsets, subset_names
211
+ )
212
+
213
+ # Create summary message
214
+ subgroup_metrics = results['subgroup_metrics']
215
+ summary = f"βš–οΈ Bias Detection Complete!\nAnalyzed {len(subgroup_metrics)} subgroups:\n"
216
+
217
+ for name, metrics in subgroup_metrics.items():
218
+ summary += f"β€’ {name}: confidence={metrics['mean_confidence']:.3f}\n"
219
+
220
+ return results['figure'], summary
221
+
222
+ except Exception as e:
223
+ error_msg = f"❌ Bias detection failed: {str(e)}"
224
+ print(error_msg)
225
+ return None, error_msg
226
+
227
+ def create_demo_image():
228
+ """Create a demo image for first-time users."""
229
+ # Create a simple demo image with multiple colors
230
+ img = Image.new('RGB', (224, 224), color=(150, 100, 100))
231
+
232
+ # Add different colored regions
233
+ for x in range(50, 150):
234
+ for y in range(50, 150):
235
+ img.putpixel((x, y), (100, 200, 100)) # Green square
236
+
237
+ for x in range(160, 200):
238
+ for y in range(160, 200):
239
+ img.putpixel((x, y), (100, 100, 200)) # Blue square
240
+
241
+ return img
242
+
243
+ # Create the Gradio interface
244
+ with gr.Blocks(theme=gr.themes.Soft(), title="ViT Auditing Toolkit") as demo:
245
+ gr.Markdown(
246
+ """
247
+ # 🎯 ViT Auditing Toolkit
248
+ ### An Interactive Dashboard for Model Explainability and Validation
249
+
250
+ Upload an image or use the demo image to analyze Vision Transformer model predictions
251
+ and explore various explanation methods.
252
+ """
253
+ )
254
+
255
+ # Model selection (shared across all tabs)
256
+ with gr.Row():
257
+ model_choice = gr.Dropdown(
258
+ choices=list(SUPPORTED_MODELS.keys()),
259
+ value="ViT-Base",
260
+ label="🎯 Select Model",
261
+ info="Choose which Vision Transformer model to use"
262
+ )
263
+
264
+ load_btn = gr.Button("πŸ”„ Load Model", variant="primary")
265
+ model_status = gr.Textbox(label="Model Status", interactive=False)
266
+
267
+ load_btn.click(
268
+ fn=lambda model: load_selected_model(SUPPORTED_MODELS[model]),
269
+ inputs=[model_choice],
270
+ outputs=[model_status]
271
+ )
272
+
273
+ # Tabbed interface
274
+ with gr.Tabs():
275
+ # Tab 1: Basic Explainability
276
+ with gr.TabItem("πŸ” Basic Explainability"):
277
+ with gr.Row():
278
+ with gr.Column(scale=1):
279
+ image_input = gr.Image(
280
+ label="πŸ“ Upload Image",
281
+ type="pil",
282
+ value=create_demo_image()
283
+ )
284
+
285
+ with gr.Accordion("βš™οΈ Explanation Settings", open=False):
286
+ xai_method = gr.Dropdown(
287
+ choices=[
288
+ "Attention Visualization",
289
+ "GradCAM",
290
+ "GradientSHAP"
291
+ ],
292
+ value="Attention Visualization",
293
+ label="Explanation Method"
294
+ )
295
+
296
+ with gr.Row():
297
+ layer_index = gr.Slider(
298
+ minimum=0, maximum=11, value=6, step=1,
299
+ label="Attention Layer Index"
300
+ )
301
+ head_index = gr.Slider(
302
+ minimum=0, maximum=11, value=0, step=1,
303
+ label="Attention Head Index"
304
+ )
305
+
306
+ analyze_btn = gr.Button("πŸš€ Analyze Image", variant="primary")
307
+ status_output = gr.Textbox(label="Status", interactive=False)
308
+
309
+ with gr.Column(scale=2):
310
+ with gr.Row():
311
+ original_display = gr.Image(
312
+ label="πŸ“Έ Processed Image",
313
+ interactive=False
314
+ )
315
+ prediction_display = gr.Plot(
316
+ label="πŸ“Š Model Predictions"
317
+ )
318
+
319
+ explanation_display = gr.Plot(
320
+ label="πŸ” Explanation Visualization"
321
+ )
322
+
323
+ # Connect the analyze button
324
+ analyze_btn.click(
325
+ fn=analyze_image_basic,
326
+ inputs=[image_input, model_choice, xai_method, layer_index, head_index],
327
+ outputs=[original_display, prediction_display, explanation_display, status_output]
328
+ )
329
+
330
+ # Tab 2: Counterfactual Analysis
331
+ with gr.TabItem("πŸ”„ Counterfactual Analysis"):
332
+ with gr.Row():
333
+ with gr.Column(scale=1):
334
+ cf_image_input = gr.Image(
335
+ label="πŸ“ Upload Image",
336
+ type="pil",
337
+ value=create_demo_image()
338
+ )
339
+
340
+ with gr.Accordion("βš™οΈ Counterfactual Settings", open=True):
341
+ patch_size = gr.Slider(
342
+ minimum=16, maximum=64, value=32, step=16,
343
+ label="Patch Size"
344
+ )
345
+ perturbation_type = gr.Dropdown(
346
+ choices=["blur", "blackout", "gray", "noise"],
347
+ value="blur",
348
+ label="Perturbation Type"
349
+ )
350
+
351
+ cf_analyze_btn = gr.Button("πŸ”„ Run Counterfactual Analysis", variant="primary")
352
+ cf_status_output = gr.Textbox(label="Status", interactive=False)
353
+
354
+ with gr.Column(scale=2):
355
+ cf_explanation_display = gr.Plot(
356
+ label="πŸ”„ Counterfactual Analysis Results"
357
+ )
358
+
359
+ cf_analyze_btn.click(
360
+ fn=analyze_counterfactual,
361
+ inputs=[cf_image_input, model_choice, patch_size, perturbation_type],
362
+ outputs=[cf_explanation_display, cf_status_output]
363
+ )
364
+
365
+ # Tab 3: Confidence Calibration
366
+ with gr.TabItem("πŸ“Š Confidence Calibration"):
367
+ with gr.Row():
368
+ with gr.Column(scale=1):
369
+ cal_image_input = gr.Image(
370
+ label="πŸ“ Upload Sample Image (Used to generate demo test set)",
371
+ type="pil",
372
+ value=create_demo_image()
373
+ )
374
+
375
+ with gr.Accordion("βš™οΈ Calibration Settings", open=True):
376
+ n_bins = gr.Slider(
377
+ minimum=5, maximum=20, value=10, step=1,
378
+ label="Number of Bins"
379
+ )
380
+
381
+ cal_analyze_btn = gr.Button("πŸ“Š Analyze Calibration", variant="primary")
382
+ cal_status_output = gr.Textbox(label="Status", interactive=False)
383
+
384
+ with gr.Column(scale=2):
385
+ cal_explanation_display = gr.Plot(
386
+ label="πŸ“Š Calibration Analysis Results"
387
+ )
388
+
389
+ cal_analyze_btn.click(
390
+ fn=analyze_calibration,
391
+ inputs=[cal_image_input, model_choice, n_bins],
392
+ outputs=[cal_explanation_display, cal_status_output]
393
+ )
394
+
395
+ # Tab 4: Bias Detection
396
+ with gr.TabItem("βš–οΈ Bias Detection"):
397
+ with gr.Row():
398
+ with gr.Column(scale=1):
399
+ bias_image_input = gr.Image(
400
+ label="πŸ“ Upload Sample Image (Used to generate demo subgroups)",
401
+ type="pil",
402
+ value=create_demo_image()
403
+ )
404
+
405
+ bias_analyze_btn = gr.Button("βš–οΈ Detect Bias", variant="primary")
406
+ bias_status_output = gr.Textbox(label="Status", interactive=False)
407
+
408
+ with gr.Column(scale=2):
409
+ bias_explanation_display = gr.Plot(
410
+ label="βš–οΈ Bias Detection Results"
411
+ )
412
+
413
+ bias_analyze_btn.click(
414
+ fn=analyze_bias_detection,
415
+ inputs=[bias_image_input, model_choice],
416
+ outputs=[bias_explanation_display, bias_status_output]
417
+ )
418
+
419
+ # Footer
420
+ gr.Markdown(
421
+ """
422
+ ---
423
+ ### πŸ› οΈ About This Toolkit
424
+
425
+ This interactive dashboard provides comprehensive auditing capabilities for Vision Transformer models:
426
+
427
+ - **πŸ” Basic Explainability**: Understand model predictions with attention maps, GradCAM, and SHAP
428
+ - **πŸ”„ Counterfactual Analysis**: Test how predictions change with image perturbations
429
+ - **πŸ“Š Confidence Calibration**: Evaluate if the model is properly calibrated
430
+ - **βš–οΈ Bias Detection**: Identify performance variations across different subgroups
431
+
432
+ Built with ❀️ using Gradio, Transformers, and Captum.
433
+ """
434
+ )
435
+
436
+ # Launch the application
437
+ if __name__ == "__main__":
438
+ demo.launch(
439
+ server_name="localhost", # Changed from "0.0.0.0"
440
+ server_port=7860,
441
+ share=False,
442
+ show_error=True
443
+ )
src/auditor.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/auditor.py
2
+
3
+ import torch
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from PIL import Image, ImageDraw, ImageFilter
7
+ import torch.nn.functional as F
8
+ from scipy import stats
9
+ from sklearn.calibration import calibration_curve
10
+ from sklearn.metrics import brier_score_loss
11
+ import pandas as pd
12
+
13
+ class CounterfactualAnalyzer:
14
+ """Analyze how predictions change with image perturbations."""
15
+
16
+ def __init__(self, model, processor):
17
+ self.model = model
18
+ self.processor = processor
19
+ self.device = next(model.parameters()).device
20
+
21
+ def patch_perturbation_analysis(self, image, patch_size=16, perturbation_type='blur'):
22
+ """
23
+ Analyze how predictions change when different patches are perturbed.
24
+
25
+ Args:
26
+ image: PIL Image
27
+ patch_size: Size of patches to perturb
28
+ perturbation_type: Type of perturbation ('blur', 'noise', 'blackout', 'gray')
29
+
30
+ Returns:
31
+ dict: Analysis results with visualizations
32
+ """
33
+ original_probs, _, original_labels = self._predict_image(image)
34
+ original_top_label = original_labels[0]
35
+ original_confidence = original_probs[0]
36
+
37
+ # Get image dimensions
38
+ width, height = image.size
39
+
40
+ # Create grid of patches
41
+ patches_x = width // patch_size
42
+ patches_y = height // patch_size
43
+
44
+ # Store results
45
+ confidence_changes = []
46
+ prediction_changes = []
47
+ patch_heatmap = np.zeros((patches_y, patches_x))
48
+
49
+ for i in range(patches_y):
50
+ for j in range(patches_x):
51
+ # Create perturbed image
52
+ perturbed_img = self._perturb_patch(
53
+ image.copy(), j, i, patch_size, perturbation_type
54
+ )
55
+
56
+ # Get prediction on perturbed image
57
+ perturbed_probs, _, perturbed_labels = self._predict_image(perturbed_img)
58
+ perturbed_confidence = perturbed_probs[0]
59
+ perturbed_label = perturbed_labels[0]
60
+
61
+ # Calculate changes
62
+ confidence_change = perturbed_confidence - original_confidence
63
+ prediction_change = 1 if perturbed_label != original_top_label else 0
64
+
65
+ confidence_changes.append(confidence_change)
66
+ prediction_changes.append(prediction_change)
67
+ patch_heatmap[i, j] = confidence_change
68
+
69
+ # Create visualization
70
+ fig = self._create_counterfactual_visualization(
71
+ image, patch_heatmap, patch_size, original_top_label,
72
+ original_confidence, confidence_changes, prediction_changes
73
+ )
74
+
75
+ return {
76
+ 'figure': fig,
77
+ 'patch_heatmap': patch_heatmap,
78
+ 'avg_confidence_change': np.mean(confidence_changes),
79
+ 'prediction_flip_rate': np.mean(prediction_changes),
80
+ 'most_sensitive_patch': np.unravel_index(np.argmin(patch_heatmap), patch_heatmap.shape)
81
+ }
82
+
83
+ def _perturb_patch(self, image, patch_x, patch_y, patch_size, perturbation_type):
84
+ """Apply perturbation to a specific patch."""
85
+ left = patch_x * patch_size
86
+ upper = patch_y * patch_size
87
+ right = left + patch_size
88
+ lower = upper + patch_size
89
+
90
+ patch_box = (left, upper, right, lower)
91
+
92
+ if perturbation_type == 'blur':
93
+ # Extract patch, blur it, and paste back
94
+ patch = image.crop(patch_box)
95
+ blurred_patch = patch.filter(ImageFilter.GaussianBlur(5))
96
+ image.paste(blurred_patch, patch_box)
97
+
98
+ elif perturbation_type == 'blackout':
99
+ # Black out the patch
100
+ draw = ImageDraw.Draw(image)
101
+ draw.rectangle(patch_box, fill='black')
102
+
103
+ elif perturbation_type == 'gray':
104
+ # Convert patch to grayscale
105
+ patch = image.crop(patch_box)
106
+ gray_patch = patch.convert('L').convert('RGB')
107
+ image.paste(gray_patch, patch_box)
108
+
109
+ elif perturbation_type == 'noise':
110
+ # Add noise to patch
111
+ patch = np.array(image.crop(patch_box))
112
+ noise = np.random.normal(0, 50, patch.shape).astype(np.uint8)
113
+ noisy_patch = np.clip(patch + noise, 0, 255).astype(np.uint8)
114
+ image.paste(Image.fromarray(noisy_patch), patch_box)
115
+
116
+ return image
117
+
118
+ def _predict_image(self, image):
119
+ """Helper function to get predictions."""
120
+ from predictor import predict_image
121
+ return predict_image(image, self.model, self.processor, top_k=5)
122
+
123
+ def _create_counterfactual_visualization(self, image, patch_heatmap, patch_size,
124
+ original_label, original_confidence,
125
+ confidence_changes, prediction_changes):
126
+ """Create visualization for counterfactual analysis."""
127
+ fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
128
+
129
+ # Original image
130
+ ax1.imshow(image)
131
+ ax1.set_title(f'Original Image\nPrediction: {original_label} ({original_confidence:.2%})',
132
+ fontweight='bold')
133
+ ax1.axis('off')
134
+
135
+ # Patch sensitivity heatmap
136
+ im = ax2.imshow(patch_heatmap, cmap='RdBu_r', vmin=-0.5, vmax=0.5)
137
+ ax2.set_title('Patch Sensitivity Heatmap\n(Confidence Change When Perturbed)',
138
+ fontweight='bold')
139
+ ax2.set_xlabel('Patch X')
140
+ ax2.set_ylabel('Patch Y')
141
+ plt.colorbar(im, ax=ax2, label='Confidence Change')
142
+
143
+ # Add patch grid to original image
144
+ width, height = image.size
145
+ for i in range(patch_heatmap.shape[0]):
146
+ for j in range(patch_heatmap.shape[1]):
147
+ rect = plt.Rectangle((j * patch_size, i * patch_size),
148
+ patch_size, patch_size,
149
+ linewidth=1, edgecolor='red',
150
+ facecolor='none', alpha=0.3)
151
+ ax1.add_patch(rect)
152
+
153
+ # Confidence change distribution
154
+ ax3.hist(confidence_changes, bins=20, alpha=0.7, color='skyblue')
155
+ ax3.axvline(0, color='red', linestyle='--', label='No Change')
156
+ ax3.set_xlabel('Confidence Change')
157
+ ax3.set_ylabel('Frequency')
158
+ ax3.set_title('Distribution of Confidence Changes', fontweight='bold')
159
+ ax3.legend()
160
+ ax3.grid(alpha=0.3)
161
+
162
+ # Prediction flip analysis
163
+ flip_rate = np.mean(prediction_changes)
164
+ ax4.bar(['No Flip', 'Flip'], [1 - flip_rate, flip_rate], color=['green', 'red'])
165
+ ax4.set_ylabel('Proportion')
166
+ ax4.set_title(f'Prediction Flip Rate: {flip_rate:.2%}', fontweight='bold')
167
+ ax4.grid(alpha=0.3)
168
+
169
+ plt.tight_layout()
170
+ return fig
171
+
172
+ class ConfidenceCalibrationAnalyzer:
173
+ """Analyze model calibration and confidence metrics."""
174
+
175
+ def __init__(self, model, processor):
176
+ self.model = model
177
+ self.processor = processor
178
+ self.device = next(model.parameters()).device
179
+
180
+ def analyze_calibration(self, test_images, test_labels=None, n_bins=10):
181
+ """
182
+ Analyze model calibration using confidence scores.
183
+
184
+ Args:
185
+ test_images: List of PIL Images for testing
186
+ test_labels: Optional true labels for accuracy calculation
187
+ n_bins: Number of bins for calibration curve
188
+
189
+ Returns:
190
+ dict: Calibration analysis results
191
+ """
192
+ confidences = []
193
+ predictions = []
194
+ max_confidences = []
195
+
196
+ # Get predictions and confidences
197
+ for img in test_images:
198
+ probs, indices, labels = self._predict_image(img)
199
+ max_confidences.append(probs[0])
200
+ predictions.append(labels[0])
201
+ confidences.append(probs)
202
+
203
+ max_confidences = np.array(max_confidences)
204
+
205
+ # Create calibration analysis
206
+ fig = self._create_calibration_visualization(
207
+ max_confidences, test_labels, predictions, n_bins
208
+ )
209
+
210
+ # Calculate calibration metrics
211
+ calibration_metrics = self._calculate_calibration_metrics(
212
+ max_confidences, test_labels, predictions
213
+ )
214
+
215
+ return {
216
+ 'figure': fig,
217
+ 'metrics': calibration_metrics,
218
+ 'confidence_distribution': max_confidences
219
+ }
220
+
221
+ def _predict_image(self, image):
222
+ """Helper function to get predictions."""
223
+ from predictor import predict_image
224
+ return predict_image(image, self.model, self.processor, top_k=5)
225
+
226
+ def _create_calibration_visualization(self, confidences, true_labels, predictions, n_bins):
227
+ """Create calibration visualization."""
228
+ fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
229
+
230
+ # Confidence distribution
231
+ ax1.hist(confidences, bins=20, alpha=0.7, color='lightblue', edgecolor='black')
232
+ ax1.set_xlabel('Confidence Score')
233
+ ax1.set_ylabel('Frequency')
234
+ ax1.set_title('Distribution of Confidence Scores', fontweight='bold')
235
+ ax1.axvline(np.mean(confidences), color='red', linestyle='--',
236
+ label=f'Mean: {np.mean(confidences):.3f}')
237
+ ax1.legend()
238
+ ax1.grid(alpha=0.3)
239
+
240
+ # Reliability diagram (if true labels available)
241
+ if true_labels is not None:
242
+ # Convert to binary correctness
243
+ correct = np.array([pred == true for pred, true in zip(predictions, true_labels)])
244
+
245
+ fraction_of_positives, mean_predicted_prob = calibration_curve(
246
+ correct, confidences, n_bins=n_bins, strategy='uniform'
247
+ )
248
+
249
+ ax2.plot(mean_predicted_prob, fraction_of_positives, "s-", label='Model')
250
+ ax2.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
251
+ ax2.set_xlabel('Mean Predicted Probability')
252
+ ax2.set_ylabel('Fraction of Positives')
253
+ ax2.set_title('Reliability Diagram', fontweight='bold')
254
+ ax2.legend()
255
+ ax2.grid(alpha=0.3)
256
+
257
+ # Calculate ECE
258
+ bin_edges = np.linspace(0, 1, n_bins + 1)
259
+ bin_indices = np.digitize(confidences, bin_edges) - 1
260
+ bin_indices = np.clip(bin_indices, 0, n_bins - 1)
261
+
262
+ ece = 0
263
+ for bin_idx in range(n_bins):
264
+ mask = bin_indices == bin_idx
265
+ if np.sum(mask) > 0:
266
+ bin_conf = np.mean(confidences[mask])
267
+ bin_acc = np.mean(correct[mask])
268
+ ece += (np.sum(mask) / len(confidences)) * np.abs(bin_acc - bin_conf)
269
+
270
+ ax2.text(0.1, 0.9, f'ECE: {ece:.3f}', transform=ax2.transAxes,
271
+ bbox=dict(boxstyle="round,pad=0.3", facecolor="yellow", alpha=0.7))
272
+
273
+ # Confidence vs accuracy (if true labels available)
274
+ if true_labels is not None:
275
+ confidence_bins = np.linspace(0, 1, n_bins + 1)
276
+ bin_accuracies = []
277
+ bin_confidences = []
278
+
279
+ for i in range(n_bins):
280
+ mask = (confidences >= confidence_bins[i]) & (confidences < confidence_bins[i+1])
281
+ if np.sum(mask) > 0:
282
+ bin_acc = np.mean(correct[mask])
283
+ bin_conf = np.mean(confidences[mask])
284
+ bin_accuracies.append(bin_acc)
285
+ bin_confidences.append(bin_conf)
286
+
287
+ ax3.plot(bin_confidences, bin_accuracies, 'o-', label='Model')
288
+ ax3.plot([0, 1], [0, 1], 'k--', label='Ideal')
289
+ ax3.set_xlabel('Average Confidence')
290
+ ax3.set_ylabel('Average Accuracy')
291
+ ax3.set_title('Confidence vs Accuracy', fontweight='bold')
292
+ ax3.legend()
293
+ ax3.grid(alpha=0.3)
294
+
295
+ # Top-1 vs Top-5 confidence gap
296
+ if len(confidences) > 0 and isinstance(confidences[0], np.ndarray):
297
+ top1_conf = [c[0] for c in confidences]
298
+ top5_conf = [np.sum(c[:5]) for c in confidences]
299
+ confidence_gap = [t1 - (t5 - t1)/4 for t1, t5 in zip(top1_conf, top5_conf)]
300
+
301
+ ax4.hist(confidence_gap, bins=20, alpha=0.7, color='lightgreen', edgecolor='black')
302
+ ax4.set_xlabel('Confidence Gap (Top-1 vs Rest)')
303
+ ax4.set_ylabel('Frequency')
304
+ ax4.set_title('Distribution of Confidence Gaps', fontweight='bold')
305
+ ax4.grid(alpha=0.3)
306
+
307
+ plt.tight_layout()
308
+ return fig
309
+
310
+ def _calculate_calibration_metrics(self, confidences, true_labels, predictions):
311
+ """Calculate calibration metrics."""
312
+ metrics = {
313
+ 'mean_confidence': float(np.mean(confidences)),
314
+ 'confidence_std': float(np.std(confidences)),
315
+ 'overconfident_rate': float(np.mean(confidences > 0.8)),
316
+ 'underconfident_rate': float(np.mean(confidences < 0.2)),
317
+ }
318
+
319
+ if true_labels is not None:
320
+ correct = np.array([pred == true for pred, true in zip(predictions, true_labels)])
321
+ accuracy = np.mean(correct)
322
+ avg_confidence = np.mean(confidences)
323
+
324
+ metrics.update({
325
+ 'accuracy': float(accuracy),
326
+ 'confidence_gap': float(avg_confidence - accuracy),
327
+ 'brier_score': float(brier_score_loss(correct, confidences))
328
+ })
329
+
330
+ return metrics
331
+
332
+ class BiasDetector:
333
+ """Detect potential biases in model performance across subgroups."""
334
+
335
+ def __init__(self, model, processor):
336
+ self.model = model
337
+ self.processor = processor
338
+ self.device = next(model.parameters()).device
339
+
340
+ def analyze_subgroup_performance(self, image_subsets, subset_names, true_labels_subsets=None):
341
+ """
342
+ Analyze performance across different subgroups.
343
+
344
+ Args:
345
+ image_subsets: List of image subsets for each subgroup
346
+ subset_names: Names for each subgroup
347
+ true_labels_subsets: Optional true labels for each subset
348
+
349
+ Returns:
350
+ dict: Bias analysis results
351
+ """
352
+ subgroup_metrics = {}
353
+
354
+ for i, (subset, name) in enumerate(zip(image_subsets, subset_names)):
355
+ confidences = []
356
+ predictions = []
357
+
358
+ for img in subset:
359
+ probs, indices, labels = self._predict_image(img)
360
+ confidences.append(probs[0])
361
+ predictions.append(labels[0])
362
+
363
+ metrics = {
364
+ 'mean_confidence': np.mean(confidences),
365
+ 'confidence_std': np.std(confidences),
366
+ 'sample_size': len(subset)
367
+ }
368
+
369
+ # Calculate accuracy if true labels provided
370
+ if true_labels_subsets is not None and i < len(true_labels_subsets):
371
+ true_labels = true_labels_subsets[i]
372
+ correct = [pred == true for pred, true in zip(predictions, true_labels)]
373
+ metrics['accuracy'] = np.mean(correct)
374
+ metrics['error_rate'] = 1 - metrics['accuracy']
375
+
376
+ subgroup_metrics[name] = metrics
377
+
378
+ # Create bias analysis visualization
379
+ fig = self._create_bias_visualization(subgroup_metrics, true_labels_subsets is not None)
380
+
381
+ # Calculate fairness metrics
382
+ fairness_metrics = self._calculate_fairness_metrics(subgroup_metrics)
383
+
384
+ return {
385
+ 'figure': fig,
386
+ 'subgroup_metrics': subgroup_metrics,
387
+ 'fairness_metrics': fairness_metrics
388
+ }
389
+
390
+ def _predict_image(self, image):
391
+ """Helper function to get predictions."""
392
+ from predictor import predict_image
393
+ return predict_image(image, self.model, self.processor, top_k=5)
394
+
395
+ def _create_bias_visualization(self, subgroup_metrics, has_accuracy):
396
+ """Create visualization for bias analysis."""
397
+ if has_accuracy:
398
+ fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5))
399
+ else:
400
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
401
+
402
+ subgroups = list(subgroup_metrics.keys())
403
+
404
+ # Confidence by subgroup
405
+ confidences = [metrics['mean_confidence'] for metrics in subgroup_metrics.values()]
406
+ ax1.bar(subgroups, confidences, color='lightblue', alpha=0.7)
407
+ ax1.set_ylabel('Mean Confidence')
408
+ ax1.set_title('Mean Confidence by Subgroup', fontweight='bold')
409
+ ax1.tick_params(axis='x', rotation=45)
410
+ ax1.grid(axis='y', alpha=0.3)
411
+
412
+ # Add confidence values on bars
413
+ for i, v in enumerate(confidences):
414
+ ax1.text(i, v + 0.01, f'{v:.3f}', ha='center', va='bottom')
415
+
416
+ # Sample sizes
417
+ sample_sizes = [metrics['sample_size'] for metrics in subgroup_metrics.values()]
418
+ ax2.bar(subgroups, sample_sizes, color='lightgreen', alpha=0.7)
419
+ ax2.set_ylabel('Sample Size')
420
+ ax2.set_title('Sample Size by Subgroup', fontweight='bold')
421
+ ax2.tick_params(axis='x', rotation=45)
422
+ ax2.grid(axis='y', alpha=0.3)
423
+
424
+ # Add sample size values on bars
425
+ for i, v in enumerate(sample_sizes):
426
+ ax2.text(i, v + max(sample_sizes)*0.01, f'{v}', ha='center', va='bottom')
427
+
428
+ # Accuracy by subgroup (if available)
429
+ if has_accuracy:
430
+ accuracies = [metrics.get('accuracy', 0) for metrics in subgroup_metrics.values()]
431
+ ax3.bar(subgroups, accuracies, color='lightcoral', alpha=0.7)
432
+ ax3.set_ylabel('Accuracy')
433
+ ax3.set_title('Accuracy by Subgroup', fontweight='bold')
434
+ ax3.tick_params(axis='x', rotation=45)
435
+ ax3.grid(axis='y', alpha=0.3)
436
+
437
+ # Add accuracy values on bars
438
+ for i, v in enumerate(accuracies):
439
+ ax3.text(i, v + 0.01, f'{v:.3f}', ha='center', va='bottom')
440
+
441
+ plt.tight_layout()
442
+ return fig
443
+
444
+ def _calculate_fairness_metrics(self, subgroup_metrics):
445
+ """Calculate fairness metrics."""
446
+ fairness_metrics = {}
447
+
448
+ # Check if we have accuracy metrics
449
+ has_accuracy = all('accuracy' in metrics for metrics in subgroup_metrics.values())
450
+
451
+ if has_accuracy and len(subgroup_metrics) >= 2:
452
+ accuracies = [metrics['accuracy'] for metrics in subgroup_metrics.values()]
453
+ confidences = [metrics['mean_confidence'] for metrics in subgroup_metrics.values()]
454
+
455
+ fairness_metrics = {
456
+ 'accuracy_range': float(max(accuracies) - min(accuracies)),
457
+ 'accuracy_std': float(np.std(accuracies)),
458
+ 'confidence_range': float(max(confidences) - min(confidences)),
459
+ 'max_accuracy_disparity': float(max(accuracies) / min(accuracies) if min(accuracies) > 0 else float('inf')),
460
+ }
461
+
462
+ return fairness_metrics
463
+
464
+ # Convenience function to create all auditors
465
+ def create_auditors(model, processor):
466
+ """Create all auditing analyzers."""
467
+ return {
468
+ 'counterfactual': CounterfactualAnalyzer(model, processor),
469
+ 'calibration': ConfidenceCalibrationAnalyzer(model, processor),
470
+ 'bias': BiasDetector(model, processor)
471
+ }
tests/test_advanced_features.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # test_advanced_features.py
2
+
3
+ import sys
4
+ import os
5
+ sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
6
+
7
+ from model_loader import load_model_and_processor
8
+ from auditor import create_auditors, CounterfactualAnalyzer, ConfidenceCalibrationAnalyzer, BiasDetector
9
+ from PIL import Image
10
+ import matplotlib.pyplot as plt
11
+ import numpy as np
12
+
13
+ def create_test_subsets():
14
+ """Create dummy test subsets for bias detection demo."""
15
+ # Create different colored images to simulate subgroups
16
+ subsets = []
17
+ subset_names = ['Red Dominant', 'Green Dominant', 'Blue Dominant', 'Mixed Colors']
18
+
19
+ for i, name in enumerate(subset_names):
20
+ subset = []
21
+ for j in range(10): # 10 images per subset
22
+ if name == 'Red Dominant':
23
+ img = Image.new('RGB', (224, 224), color=(200, 50, 50))
24
+ elif name == 'Green Dominant':
25
+ img = Image.new('RGB', (224, 224), color=(50, 200, 50))
26
+ elif name == 'Blue Dominant':
27
+ img = Image.new('RGB', (224, 224), color=(50, 50, 200))
28
+ else: # Mixed
29
+ color = (50 + j*20, 100 + j*10, 150 - j*15)
30
+ img = Image.new('RGB', (224, 224), color=color)
31
+ subset.append(img)
32
+ subsets.append(subset)
33
+
34
+ return subsets, subset_names
35
+
36
+ def test_advanced_features():
37
+ """
38
+ Test the advanced auditing features.
39
+ """
40
+ print("πŸ”¬ Testing Advanced Auditing Features")
41
+ print("=" * 50)
42
+
43
+ try:
44
+ # Load model
45
+ model, processor = load_model_and_processor()
46
+
47
+ # Create auditors
48
+ auditors = create_auditors(model, processor)
49
+ print("βœ… Auditors created: Counterfactual, Calibration, Bias Detection")
50
+
51
+ # Create test image
52
+ test_image = Image.new('RGB', (224, 224), color=(150, 100, 100))
53
+ for x in range(50, 150):
54
+ for y in range(50, 150):
55
+ test_image.putpixel((x, y), (100, 200, 100))
56
+
57
+ print("\n1. Testing Counterfactual Analysis...")
58
+ counterfactual_results = auditors['counterfactual'].patch_perturbation_analysis(
59
+ test_image, patch_size=32, perturbation_type='blur'
60
+ )
61
+ print(" βœ… Counterfactual analysis completed")
62
+ print(f" πŸ“Š Avg confidence change: {counterfactual_results['avg_confidence_change']:.4f}")
63
+ print(f" πŸ”€ Prediction flip rate: {counterfactual_results['prediction_flip_rate']:.2%}")
64
+
65
+ print("\n2. Testing Confidence Calibration...")
66
+ # Create dummy test set
67
+ test_images = [test_image] * 5 # Simple test with same image
68
+ calibration_results = auditors['calibration'].analyze_calibration(test_images)
69
+ print(" βœ… Calibration analysis completed")
70
+ print(f" πŸ“ˆ Mean confidence: {calibration_results['metrics']['mean_confidence']:.3f}")
71
+ print(f" 🎯 Overconfident rate: {calibration_results['metrics']['overconfident_rate']:.2%}")
72
+
73
+ print("\n3. Testing Bias Detection...")
74
+ test_subsets, subset_names = create_test_subsets()
75
+ bias_results = auditors['bias'].analyze_subgroup_performance(test_subsets, subset_names)
76
+ print(" βœ… Bias detection analysis completed")
77
+ print(f" πŸ“Š Analyzed {len(subset_names)} subgroups")
78
+
79
+ # Display results
80
+ print("\nπŸ“Š DISPLAYING ADVANCED ANALYSIS RESULTS:")
81
+ print("=" * 40)
82
+
83
+ # Counterfactual results
84
+ plt.figure(counterfactual_results['figure'].number)
85
+ plt.suptitle("1. Counterfactual Analysis - Patch Sensitivity", fontweight='bold', y=0.98)
86
+ plt.show()
87
+
88
+ # Calibration results
89
+ plt.figure(calibration_results['figure'].number)
90
+ plt.suptitle("2. Confidence Calibration Analysis", fontweight='bold', y=0.98)
91
+ plt.show()
92
+
93
+ # Bias detection results
94
+ plt.figure(bias_results['figure'].number)
95
+ plt.suptitle("3. Bias Detection - Subgroup Analysis", fontweight='bold', y=0.98)
96
+ plt.show()
97
+
98
+ # Print detailed metrics
99
+ print("\nπŸ“ˆ DETAILED METRICS:")
100
+ print("-" * 20)
101
+
102
+ print("\n🎯 Counterfactual Analysis:")
103
+ for key, value in counterfactual_results.items():
104
+ if key != 'figure':
105
+ print(f" {key}: {value}")
106
+
107
+ print("\nπŸ“Š Calibration Analysis:")
108
+ for key, value in calibration_results['metrics'].items():
109
+ print(f" {key}: {value}")
110
+
111
+ print("\nβš–οΈ Bias Detection:")
112
+ print(" Subgroup Metrics:")
113
+ for subgroup, metrics in bias_results['subgroup_metrics'].items():
114
+ print(f" {subgroup}:")
115
+ for metric, value in metrics.items():
116
+ print(f" {metric}: {value}")
117
+
118
+ print("\nπŸŽ‰ ADVANCED FEATURES SUMMARY:")
119
+ print("=" * 35)
120
+ print("βœ… Counterfactual Analysis - Patch Sensitivity")
121
+ print("οΏ½οΏ½οΏ½ Confidence Calibration - Reliability Analysis")
122
+ print("βœ… Bias Detection - Subgroup Performance")
123
+ print("βœ… All advanced auditing features working!")
124
+
125
+ return True
126
+
127
+ except Exception as e:
128
+ print(f"❌ Advanced features test failed: {e}")
129
+ import traceback
130
+ traceback.print_exc()
131
+ return False
132
+
133
+ if __name__ == "__main__":
134
+ success = test_advanced_features()
135
+
136
+ if success:
137
+ print("\nπŸš€ All Phase 1 + Advanced Features Complete!")
138
+ print(" Ready for Phase 2: Dashboard Integration!")
139
+ else:
140
+ print("\n⚠️ Some advanced features need debugging")