giuto commited on
Commit
8ed9c1d
·
1 Parent(s): a94143c

Add scripts for baseline preparation and drift detection, including Grafana dashboard configuration

Browse files
monitoring/drift/scripts/prepare_baseline.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Prepare baseline/reference data for drift detection.
3
+ This script samples representative data from the training set.
4
+ """
5
+
6
+ import pickle
7
+ import pandas as pd
8
+ import numpy as np
9
+ from pathlib import Path
10
+ from sklearn.model_selection import train_test_split
11
+
12
+ # Paths
13
+ PROJECT_ROOT = Path(__file__).parent.parent.parent.parent
14
+ BASELINE_DIR = Path(__file__).parent.parent / "baseline"
15
+ BASELINE_DIR.mkdir(parents=True, exist_ok=True)
16
+
17
+
18
+ def load_training_data():
19
+ """Load the original training dataset."""
20
+ # Adjust path to your actual data
21
+ data_path = PROJECT_ROOT / "data" / "train.csv"
22
+
23
+ if not data_path.exists():
24
+ raise FileNotFoundError(f"Training data not found at {data_path}")
25
+
26
+ df = pd.read_csv(data_path)
27
+ print(f"Loaded {len(df)} training samples")
28
+ return df
29
+
30
+
31
+ def prepare_baseline(df, sample_size=1000, random_state=42):
32
+ """
33
+ Sample representative baseline data.
34
+
35
+ Args:
36
+ df: Training dataframe
37
+ sample_size: Number of samples for baseline
38
+ random_state: Random seed for reproducibility
39
+
40
+ Returns:
41
+ Baseline dataframe
42
+ """
43
+ # Stratified sampling if you have labels
44
+ if 'label' in df.columns:
45
+ _, baseline_df = train_test_split(
46
+ df,
47
+ test_size=sample_size,
48
+ random_state=random_state,
49
+ stratify=df['label']
50
+ )
51
+ else:
52
+ baseline_df = df.sample(n=min(sample_size, len(df)), random_state=random_state)
53
+
54
+ print(f"Sampled {len(baseline_df)} baseline samples")
55
+ return baseline_df
56
+
57
+
58
+ def extract_features(df):
59
+ """
60
+ Extract features used for drift detection.
61
+ Should match the features used by your model.
62
+ """
63
+
64
+ feature_columns = [col for col in df.columns if col not in ['label', 'id', 'timestamp']]
65
+ X = df[feature_columns].values
66
+
67
+ print(f"Extracted {X.shape[1]} features from {X.shape[0]} samples")
68
+ return X
69
+
70
+
71
+ def save_baseline(baseline_data, filename="reference_data.pkl"):
72
+ """Save baseline data to disk."""
73
+ baseline_path = BASELINE_DIR / filename
74
+
75
+ with open(baseline_path, 'wb') as f:
76
+ pickle.dump(baseline_data, f)
77
+
78
+ print(f"Baseline saved to {baseline_path}")
79
+ print(f" Shape: {baseline_data.shape}")
80
+ print(f" Size: {baseline_path.stat().st_size / 1024:.2f} KB")
81
+
82
+
83
+ def main():
84
+ """Main execution."""
85
+ print("=" * 60)
86
+ print("Preparing Baseline Data for Drift Detection")
87
+ print("=" * 60)
88
+
89
+ # Load data
90
+ df = load_training_data()
91
+
92
+ # Sample baseline
93
+ baseline_df = prepare_baseline(df, sample_size=1000)
94
+
95
+ # Extract features
96
+ X_baseline = extract_features(baseline_df)
97
+
98
+ # Save
99
+ save_baseline(X_baseline)
100
+
101
+ print("\n" + "=" * 60)
102
+ print("Baseline preparation complete!")
103
+ print("=" * 60)
104
+
105
+
106
+ if __name__ == "__main__":
107
+ main()
monitoring/drift/scripts/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ alibi-detect>=0.11.4
2
+ pandas>=2.0.0
3
+ numpy>=1.24.0
4
+ scikit-learn>=1.3.0
5
+ requests>=2.31.0
6
+ mlflow>=2.8.0
monitoring/drift/scripts/run_drift_check.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data Drift Detection using Alibi Detect.
3
+ Detects distribution shifts between baseline and new data.
4
+ """
5
+
6
+ import pickle
7
+ import json
8
+ import requests
9
+ import numpy as np
10
+ import pandas as pd
11
+ from pathlib import Path
12
+ from datetime import datetime
13
+ from alibi_detect.cd import KSDrift
14
+ from typing import Dict, Tuple
15
+
16
+ # Configuration
17
+ PROJECT_ROOT = Path(__file__).parent.parent.parent.parent
18
+ BASELINE_DIR = Path(__file__).parent.parent / "baseline"
19
+ REPORTS_DIR = Path(__file__).parent.parent / "reports"
20
+ REPORTS_DIR.mkdir(parents=True, exist_ok=True)
21
+
22
+ PUSHGATEWAY_URL = "http://localhost:9091"
23
+ P_VALUE_THRESHOLD = 0.05 # Significance level
24
+
25
+
26
+ def load_baseline() -> np.ndarray:
27
+ """Load reference/baseline data."""
28
+ baseline_path = BASELINE_DIR / "reference_data.pkl"
29
+
30
+ if not baseline_path.exists():
31
+ raise FileNotFoundError(
32
+ f"Baseline data not found at {baseline_path}\n"
33
+ f"Run `python prepare_baseline.py` first!"
34
+ )
35
+
36
+ with open(baseline_path, 'rb') as f:
37
+ X_baseline = pickle.load(f)
38
+
39
+ print(f"Loaded baseline data: {X_baseline.shape}")
40
+ return X_baseline
41
+
42
+
43
+ def load_new_data() -> np.ndarray:
44
+ """
45
+ Load new/production data to check for drift.
46
+
47
+ In production, this would fetch from:
48
+ - Database
49
+ - S3 bucket
50
+ - API logs
51
+ - Data lake
52
+
53
+ For now, simulate or load from file.
54
+ """
55
+
56
+ # Option 1: Load from file
57
+ data_path = PROJECT_ROOT / "data" / "test.csv"
58
+ if data_path.exists():
59
+ df = pd.read_csv(data_path)
60
+ # Extract same features as baseline
61
+ feature_columns = [col for col in df.columns if col not in ['label', 'id', 'timestamp']]
62
+ X_new = df[feature_columns].values[:500] # Take 500 samples
63
+ print(f"Loaded new data from file: {X_new.shape}")
64
+ return X_new
65
+
66
+ # Option 2: Simulate (for testing)
67
+ print("Simulating new data (no test file found)")
68
+ X_baseline = load_baseline()
69
+ # Add slight shift to simulate drift
70
+ X_new = X_baseline[:500] + np.random.normal(0, 0.1, (500, X_baseline.shape[1]))
71
+ return X_new
72
+
73
+
74
+ def run_drift_detection(X_baseline: np.ndarray, X_new: np.ndarray) -> Dict:
75
+ """
76
+ Run Kolmogorov-Smirnov drift detection.
77
+
78
+ Args:
79
+ X_baseline: Reference data
80
+ X_new: New data to check
81
+
82
+ Returns:
83
+ Drift detection results
84
+ """
85
+ print("\n" + "=" * 60)
86
+ print("Running Drift Detection (Kolmogorov-Smirnov Test)")
87
+ print("=" * 60)
88
+
89
+ # Initialize detector
90
+ cd = KSDrift(
91
+ X_baseline,
92
+ p_val=P_VALUE_THRESHOLD,
93
+ alternative='two-sided',
94
+ correction='bonferroni' # Multiple testing correction
95
+ )
96
+
97
+ # Run detection
98
+ preds = cd.predict(X_new)
99
+
100
+ # Extract results
101
+ results = {
102
+ "timestamp": datetime.now().isoformat(),
103
+ "drift_detected": int(preds['data']['is_drift']),
104
+ "p_value": float(preds['data']['p_val']),
105
+ "threshold": P_VALUE_THRESHOLD,
106
+ "distance": float(preds['data']['distance']),
107
+ "baseline_samples": X_baseline.shape[0],
108
+ "new_samples": X_new.shape[0],
109
+ "num_features": X_baseline.shape[1]
110
+ }
111
+
112
+ # Print results
113
+ print(f"\nResults:")
114
+ print(f" Drift Detected: {'YES' if results['drift_detected'] else 'NO'}")
115
+ print(f" P-Value: {results['p_value']:.6f} (threshold: {P_VALUE_THRESHOLD})")
116
+ print(f" Distance: {results['distance']:.6f}")
117
+ print(f" Baseline: {X_baseline.shape[0]} samples")
118
+ print(f" New Data: {X_new.shape[0]} samples")
119
+
120
+ return results
121
+
122
+
123
+ def save_report(results: Dict):
124
+ """Save drift detection report to file."""
125
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
126
+ report_path = REPORTS_DIR / f"drift_report_{timestamp}.json"
127
+
128
+ with open(report_path, 'w') as f:
129
+ json.dump(results, f, indent=2)
130
+
131
+ print(f"\nReport saved to: {report_path}")
132
+
133
+
134
+ def push_to_prometheus(results: Dict):
135
+ """
136
+ Push drift metrics to Prometheus via Pushgateway.
137
+
138
+ This allows Prometheus to scrape short-lived job metrics.
139
+ """
140
+ metrics = f"""# TYPE drift_detected gauge
141
+ # HELP drift_detected Whether data drift was detected (1=yes, 0=no)
142
+ drift_detected {results['drift_detected']}
143
+
144
+ # TYPE drift_p_value gauge
145
+ # HELP drift_p_value P-value from drift detection test
146
+ drift_p_value {results['p_value']}
147
+
148
+ # TYPE drift_distance gauge
149
+ # HELP drift_distance Statistical distance between distributions
150
+ drift_distance {results['distance']}
151
+
152
+ # TYPE drift_check_timestamp gauge
153
+ # HELP drift_check_timestamp Unix timestamp of last drift check
154
+ drift_check_timestamp {datetime.now().timestamp()}
155
+ """
156
+
157
+ try:
158
+ response = requests.post(
159
+ f"{PUSHGATEWAY_URL}/metrics/job/drift_detection/instance/hopcroft",
160
+ data=metrics,
161
+ headers={'Content-Type': 'text/plain'}
162
+ )
163
+ response.raise_for_status()
164
+ print(f"Metrics pushed to Pushgateway at {PUSHGATEWAY_URL}")
165
+ except requests.exceptions.RequestException as e:
166
+ print(f"Failed to push to Pushgateway: {e}")
167
+ print(f" Make sure Pushgateway is running: docker compose ps pushgateway")
168
+
169
+
170
+ def main():
171
+ """Main execution."""
172
+ print("\n" + "=" * 60)
173
+ print("Hopcroft Data Drift Detection")
174
+ print("=" * 60)
175
+
176
+ try:
177
+ # Load data
178
+ X_baseline = load_baseline()
179
+ X_new = load_new_data()
180
+
181
+ # Run drift detection
182
+ results = run_drift_detection(X_baseline, X_new)
183
+
184
+ # Save report
185
+ save_report(results)
186
+
187
+ # Push to Prometheus
188
+ push_to_prometheus(results)
189
+
190
+ print("\n" + "=" * 60)
191
+ print("Drift Detection Complete!")
192
+ print("=" * 60)
193
+
194
+ if results['drift_detected']:
195
+ print("\nWARNING: Data drift detected!")
196
+ print(f" P-value: {results['p_value']:.6f} < {P_VALUE_THRESHOLD}")
197
+ return 1
198
+ else:
199
+ print("\nNo significant drift detected")
200
+ return 0
201
+
202
+ except Exception as e:
203
+ print(f"\nError: {e}")
204
+ import traceback
205
+ traceback.print_exc()
206
+ return 1
207
+
208
+
209
+ if __name__ == "__main__":
210
+ exit(main())
monitoring/grafana/provisioning/dashboards/hopcroft_dashboard.json ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "annotations": {
3
+ "list": [
4
+ {
5
+ "builtIn": 1,
6
+ "datasource": "-- Grafana --",
7
+ "enable": true,
8
+ "hide": true,
9
+ "iconColor": "rgba(0, 211, 255, 1)",
10
+ "name": "Annotations & Alerts",
11
+ "type": "dashboard"
12
+ }
13
+ ]
14
+ },
15
+ "editable": true,
16
+ "gnetId": null,
17
+ "graphTooltip": 1,
18
+ "id": null,
19
+ "links": [],
20
+ "panels": [
21
+ {
22
+ "datasource": "Prometheus",
23
+ "fieldConfig": {
24
+ "defaults": {
25
+ "color": {
26
+ "mode": "thresholds"
27
+ },
28
+ "mappings": [],
29
+ "thresholds": {
30
+ "mode": "absolute",
31
+ "steps": [
32
+ {
33
+ "color": "green",
34
+ "value": null
35
+ },
36
+ {
37
+ "color": "red",
38
+ "value": 80
39
+ }
40
+ ]
41
+ },
42
+ "unit": "reqps"
43
+ }
44
+ },
45
+ "gridPos": {
46
+ "h": 8,
47
+ "w": 6,
48
+ "x": 0,
49
+ "y": 0
50
+ },
51
+ "id": 1,
52
+ "options": {
53
+ "orientation": "auto",
54
+ "reduceOptions": {
55
+ "calcs": ["lastNotNull"],
56
+ "fields": "",
57
+ "values": false
58
+ },
59
+ "showThresholdLabels": false,
60
+ "showThresholdMarkers": true
61
+ },
62
+ "pluginVersion": "9.0.0",
63
+ "targets": [
64
+ {
65
+ "expr": "rate(fastapi_requests_total[1m])",
66
+ "refId": "A"
67
+ }
68
+ ],
69
+ "title": "Request Rate",
70
+ "type": "gauge",
71
+ "description": "Number of requests per second handled by the API"
72
+ },
73
+ {
74
+ "datasource": "Prometheus",
75
+ "fieldConfig": {
76
+ "defaults": {
77
+ "color": {
78
+ "mode": "palette-classic"
79
+ },
80
+ "custom": {
81
+ "axisLabel": "",
82
+ "axisPlacement": "auto",
83
+ "barAlignment": 0,
84
+ "drawStyle": "line",
85
+ "fillOpacity": 10,
86
+ "gradientMode": "none",
87
+ "hideFrom": {
88
+ "tooltip": false,
89
+ "viz": false,
90
+ "legend": false
91
+ },
92
+ "lineInterpolation": "linear",
93
+ "lineWidth": 1,
94
+ "pointSize": 5,
95
+ "scaleDistribution": {
96
+ "type": "linear"
97
+ },
98
+ "showPoints": "never",
99
+ "spanNulls": true
100
+ },
101
+ "mappings": [],
102
+ "thresholds": {
103
+ "mode": "absolute",
104
+ "steps": [
105
+ {
106
+ "color": "green",
107
+ "value": null
108
+ }
109
+ ]
110
+ },
111
+ "unit": "ms"
112
+ }
113
+ },
114
+ "gridPos": {
115
+ "h": 8,
116
+ "w": 18,
117
+ "x": 6,
118
+ "y": 0
119
+ },
120
+ "id": 2,
121
+ "options": {
122
+ "legend": {
123
+ "calcs": ["mean", "max"],
124
+ "displayMode": "table",
125
+ "placement": "right"
126
+ },
127
+ "tooltip": {
128
+ "mode": "multi"
129
+ }
130
+ },
131
+ "pluginVersion": "9.0.0",
132
+ "targets": [
133
+ {
134
+ "expr": "histogram_quantile(0.95, rate(fastapi_request_duration_seconds_bucket[5m])) * 1000",
135
+ "legendFormat": "p95",
136
+ "refId": "A"
137
+ },
138
+ {
139
+ "expr": "histogram_quantile(0.50, rate(fastapi_request_duration_seconds_bucket[5m])) * 1000",
140
+ "legendFormat": "p50 (median)",
141
+ "refId": "B"
142
+ }
143
+ ],
144
+ "title": "Request Latency (p50, p95)",
145
+ "type": "timeseries",
146
+ "description": "API response time percentiles over time"
147
+ },
148
+ {
149
+ "datasource": "Prometheus",
150
+ "fieldConfig": {
151
+ "defaults": {
152
+ "color": {
153
+ "mode": "thresholds"
154
+ },
155
+ "mappings": [
156
+ {
157
+ "options": {
158
+ "0": {
159
+ "color": "red",
160
+ "index": 1,
161
+ "text": "No Drift"
162
+ },
163
+ "1": {
164
+ "color": "green",
165
+ "index": 0,
166
+ "text": "Drift Detected"
167
+ }
168
+ },
169
+ "type": "value"
170
+ }
171
+ ],
172
+ "thresholds": {
173
+ "mode": "absolute",
174
+ "steps": [
175
+ {
176
+ "color": "green",
177
+ "value": null
178
+ }
179
+ ]
180
+ }
181
+ }
182
+ },
183
+ "gridPos": {
184
+ "h": 6,
185
+ "w": 6,
186
+ "x": 0,
187
+ "y": 8
188
+ },
189
+ "id": 3,
190
+ "options": {
191
+ "orientation": "auto",
192
+ "reduceOptions": {
193
+ "calcs": ["lastNotNull"],
194
+ "fields": "",
195
+ "values": false
196
+ },
197
+ "showThresholdLabels": false,
198
+ "showThresholdMarkers": true,
199
+ "text": {}
200
+ },
201
+ "pluginVersion": "9.0.0",
202
+ "targets": [
203
+ {
204
+ "expr": "drift_detected",
205
+ "refId": "A"
206
+ }
207
+ ],
208
+ "title": "Data Drift Status",
209
+ "type": "stat",
210
+ "description": "Current data drift detection status (1 = drift detected, 0 = no drift)"
211
+ },
212
+ {
213
+ "datasource": "Prometheus",
214
+ "fieldConfig": {
215
+ "defaults": {
216
+ "color": {
217
+ "mode": "thresholds"
218
+ },
219
+ "decimals": 4,
220
+ "mappings": [],
221
+ "thresholds": {
222
+ "mode": "absolute",
223
+ "steps": [
224
+ {
225
+ "color": "green",
226
+ "value": null
227
+ },
228
+ {
229
+ "color": "yellow",
230
+ "value": 0.01
231
+ },
232
+ {
233
+ "color": "red",
234
+ "value": 0.05
235
+ }
236
+ ]
237
+ },
238
+ "unit": "short"
239
+ }
240
+ },
241
+ "gridPos": {
242
+ "h": 6,
243
+ "w": 6,
244
+ "x": 6,
245
+ "y": 8
246
+ },
247
+ "id": 4,
248
+ "options": {
249
+ "orientation": "auto",
250
+ "reduceOptions": {
251
+ "calcs": ["lastNotNull"],
252
+ "fields": "",
253
+ "values": false
254
+ },
255
+ "showThresholdLabels": false,
256
+ "showThresholdMarkers": true,
257
+ "text": {}
258
+ },
259
+ "pluginVersion": "9.0.0",
260
+ "targets": [
261
+ {
262
+ "expr": "drift_p_value",
263
+ "refId": "A"
264
+ }
265
+ ],
266
+ "title": "Drift P-Value",
267
+ "type": "stat",
268
+ "description": "Statistical significance of detected drift (lower = more significant)"
269
+ },
270
+ {
271
+ "datasource": "Prometheus",
272
+ "fieldConfig": {
273
+ "defaults": {
274
+ "color": {
275
+ "mode": "palette-classic"
276
+ },
277
+ "custom": {
278
+ "axisLabel": "",
279
+ "axisPlacement": "auto",
280
+ "barAlignment": 0,
281
+ "drawStyle": "line",
282
+ "fillOpacity": 10,
283
+ "gradientMode": "none",
284
+ "hideFrom": {
285
+ "tooltip": false,
286
+ "viz": false,
287
+ "legend": false
288
+ },
289
+ "lineInterpolation": "linear",
290
+ "lineWidth": 1,
291
+ "pointSize": 5,
292
+ "scaleDistribution": {
293
+ "type": "linear"
294
+ },
295
+ "showPoints": "auto",
296
+ "spanNulls": false
297
+ },
298
+ "mappings": [],
299
+ "thresholds": {
300
+ "mode": "absolute",
301
+ "steps": [
302
+ {
303
+ "color": "green",
304
+ "value": null
305
+ }
306
+ ]
307
+ },
308
+ "unit": "short"
309
+ }
310
+ },
311
+ "gridPos": {
312
+ "h": 6,
313
+ "w": 12,
314
+ "x": 12,
315
+ "y": 8
316
+ },
317
+ "id": 5,
318
+ "options": {
319
+ "legend": {
320
+ "calcs": ["mean", "lastNotNull"],
321
+ "displayMode": "table",
322
+ "placement": "right"
323
+ },
324
+ "tooltip": {
325
+ "mode": "multi"
326
+ }
327
+ },
328
+ "pluginVersion": "9.0.0",
329
+ "targets": [
330
+ {
331
+ "expr": "drift_distance",
332
+ "legendFormat": "Distance",
333
+ "refId": "A"
334
+ }
335
+ ],
336
+ "title": "Drift Distance Over Time",
337
+ "type": "timeseries",
338
+ "description": "Statistical distance between baseline and current data distribution"
339
+ }
340
+ ],
341
+ "refresh": "10s",
342
+ "schemaVersion": 36,
343
+ "style": "dark",
344
+ "tags": ["hopcroft", "ml", "monitoring"],
345
+ "templating": {
346
+ "list": []
347
+ },
348
+ "time": {
349
+ "from": "now-1h",
350
+ "to": "now"
351
+ },
352
+ "timepicker": {},
353
+ "timezone": "",
354
+ "title": "Hopcroft ML Model Monitoring",
355
+ "uid": "hopcroft-ml-dashboard",
356
+ "version": 1,
357
+ "weekStart": ""
358
+ }
requirements.txt CHANGED
@@ -65,4 +65,6 @@ pytest-html
65
  pytest-json-report
66
 
67
  # GUI
68
- streamlit>=1.28.0
 
 
 
65
  pytest-json-report
66
 
67
  # GUI
68
+ streamlit>=1.28.0
69
+
70
+ alibi-detect>=0.11.4