File size: 60,275 Bytes
d006a6e
da409af
48963bc
 
4f6005b
48963bc
 
4f6005b
 
da409af
c04b182
 
 
da409af
4f6005b
dddc062
d006a6e
 
d16ca50
 
dddc062
da409af
5dbf496
d16ca50
5dbf496
d006a6e
 
 
 
 
 
d16ca50
 
 
c2649bd
b1a7c72
acc53dd
b1a7c72
 
 
 
 
 
 
 
acc53dd
b1a7c72
 
 
 
 
acc53dd
 
b1a7c72
 
 
 
 
 
 
 
 
 
acc53dd
b1a7c72
 
 
 
 
 
 
 
acc53dd
2f1bf78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1a7c72
 
 
 
 
 
 
 
 
c78a9de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1a7c72
 
 
 
 
 
 
 
 
 
acc53dd
b1a7c72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
acc53dd
b1a7c72
 
 
 
 
 
 
 
 
 
 
 
acc53dd
b1a7c72
 
 
 
 
 
 
1e4ca95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a37596a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a847389
 
a37596a
 
 
 
 
 
 
 
a847389
a37596a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1a7c72
 
48963bc
d16ca50
 
e9d0d37
 
 
 
 
4280841
e9d0d37
 
 
 
 
 
 
4280841
e9d0d37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d16ca50
d006a6e
d16ca50
 
da409af
d16ca50
da409af
48963bc
 
 
eae3bab
d16ca50
 
 
c6e557d
641f14c
b1a7c72
 
750060d
b968fe6
641f14c
c6e557d
f5c4f68
d128705
9a7033f
72ba172
3ce7830
d16ca50
 
 
da409af
d16ca50
 
 
2f37ded
5dbf496
d16ca50
5dbf496
 
d16ca50
da409af
48963bc
5dbf496
 
 
 
 
da409af
 
 
 
 
5dbf496
 
 
 
bb96579
da409af
b1a7c72
5dbf496
e9d0d37
b1a7c72
da409af
b1a7c72
d16ca50
 
48963bc
 
da409af
b1a7c72
 
 
da409af
e9d0d37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d16ca50
 
 
b1a7c72
d16ca50
e9d0d37
5dbf496
 
dc12e99
da409af
5dbf496
 
dc12e99
da409af
5dbf496
 
 
 
 
 
da409af
5dbf496
 
dc12e99
da409af
5dbf496
 
 
 
 
 
 
 
 
 
 
 
 
1e4ca95
5dbf496
 
 
 
 
 
 
1e4ca95
da409af
d16ca50
da409af
5dbf496
 
 
 
1e4ca95
 
 
c78a9de
1e4ca95
 
 
 
2f1bf78
 
 
 
 
 
 
 
 
 
c78a9de
 
 
 
 
5dbf496
d16ca50
 
acc53dd
5dbf496
 
c78a9de
 
 
a37596a
5dbf496
a37596a
c78a9de
a37596a
c78a9de
 
a37596a
 
 
 
 
 
 
5dbf496
c78a9de
 
da409af
 
 
 
 
b1a7c72
acc53dd
b1a7c72
 
 
 
c78a9de
b1a7c72
 
 
 
 
 
2f1bf78
 
 
 
 
 
 
 
 
 
acc53dd
d16ca50
b1a7c72
c78a9de
 
 
 
 
 
 
 
 
 
 
94afb80
 
 
 
 
 
 
 
 
 
 
 
 
 
5dbf496
d16ca50
5dbf496
 
 
 
 
d16ca50
da409af
dc12e99
5dbf496
 
dc12e99
da409af
acc53dd
d16ca50
99d5da9
da409af
e7320c0
da409af
e7320c0
 
 
 
 
da409af
 
e7320c0
da409af
 
e7320c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
da409af
 
b3aa813
da409af
 
b3aa813
da409af
 
 
fbca140
750060d
 
e7320c0
da409af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d16ca50
da409af
 
 
 
 
 
e7320c0
da409af
99d5da9
5dbf496
e3931ad
acc53dd
d16ca50
99d5da9
5dbf496
 
 
acc53dd
d16ca50
 
5dbf496
 
 
 
 
 
da409af
 
 
 
 
 
 
 
 
 
 
 
 
d16ca50
5dbf496
da409af
 
 
5dbf496
 
da409af
 
d16ca50
5dbf496
 
da409af
 
 
d16ca50
5dbf496
 
 
 
d16ca50
5dbf496
 
da409af
5dbf496
59f3ced
da409af
5dbf496
d16ca50
da409af
5dbf496
 
 
d16ca50
da409af
 
 
 
 
 
 
 
 
 
 
 
 
acc53dd
da409af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d16ca50
da409af
 
 
d16ca50
da409af
 
 
 
 
 
 
b1a7c72
da409af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5dbf496
 
 
d16ca50
5dbf496
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
from __future__ import annotations

import io
import json
import os
import traceback
from contextlib import redirect_stdout
from datetime import datetime
from typing import Any, Dict, List

import gradio as gr
import pandas as pd
import regex as re2
import re
from langchain_cohere import ChatCohere  # noqa: F401
from settings import (
    GENERAL_CONVERSATION_PROMPT,
    COHERE_MODEL_PRIMARY,
    COHERE_TIMEOUT_S,   # noqa: F401
    USE_OPEN_FALLBACKS  # noqa: F401
)
# Try to import optional HIPAA flags; fall back to safe defaults if not defined.
try:
    from settings import PHI_MODE, PERSIST_HISTORY, HISTORY_TTL_DAYS, REDACT_BEFORE_LLM, ALLOW_EXTERNAL_PHI
except Exception:
    PHI_MODE = False
    PERSIST_HISTORY = True
    HISTORY_TTL_DAYS = 365
    REDACT_BEFORE_LLM = False
    ALLOW_EXTERNAL_PHI = True

from audit_log import log_event
from privacy import safety_filter, refusal_reply
from llm_router import cohere_chat, _co_client, cohere_embed


# ---------------------- Helpers ----------------------

def load_markdown_text(filepath: str) -> str:
    try:
        with open(filepath, "r", encoding="utf-8") as f:
            return f.read()
    except FileNotFoundError:
        return f"**Error:** Document `{os.path.basename(filepath)}` not found."


def _sanitize_text(s: str) -> str:
    if not isinstance(s, str):
        return s
    return re2.sub(r"[\p{C}--[\n\t]]+", "", s)


# Conservative PHI redaction patterns
PHI_PATTERNS = [
    (re.compile(r"\b\d{3}-\d{2}-\d{4}\b"), "[REDACTED_SSN]"),
    (re.compile(r"\b\d{9}\b"), "[REDACTED_MRN]"),
    (re.compile(r"\b\d{3}[-.\s]?\d{3}[-.\s]?\d{4}\b"), "[REDACTED_PHONE]"),
    (re.compile(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}"), "[REDACTED_EMAIL]"),
    (re.compile(r"\b(19|20)\d{2}-\d{2}-\d{2}\b"), "[REDACTED_DOB]"),
    (re.compile(r"\b\d{2}/\d{2}/(19|20)\d{2}\b"), "[REDACTED_DOB]"),
    (re.compile(r"\b\d{5}(-\d{4})?\b"), "[REDACTED_ZIP]"),
]


def redact_phi(text: str) -> str:
    if not isinstance(text, str):
        return text
    t = text
    for pat, repl in PHI_PATTERNS:
        t = pat.sub(repl, t)
    return t


# ---------------------- Date Shifting Protocol ----------------------

import random
from datetime import timedelta

# Session-consistent date shift (random offset between 1-365 days, consistent within session)
_DATE_SHIFT_OFFSET: int | None = None


def _get_date_shift_offset() -> int:
    """
    Returns a consistent date shift offset for the current session.
    This ensures all dates in a single analysis are shifted by the same amount,
    preserving relative time relationships while protecting absolute dates.
    """
    global _DATE_SHIFT_OFFSET
    if _DATE_SHIFT_OFFSET is None:
        _DATE_SHIFT_OFFSET = random.randint(30, 365)  # Shift by 1-12 months
    return _DATE_SHIFT_OFFSET


def reset_date_shift_offset() -> None:
    """Reset the date shift offset for a new session."""
    global _DATE_SHIFT_OFFSET
    _DATE_SHIFT_OFFSET = None


def shift_date(date_obj, offset_days: int = None):
    """
    Shift a date by the session offset to protect PHI.
    Preserves day-of-week and relative relationships.
    
    Args:
        date_obj: datetime.date or datetime.datetime object
        offset_days: Optional specific offset; uses session offset if None
        
    Returns:
        Shifted date object of same type
    """
    if offset_days is None:
        offset_days = _get_date_shift_offset()
    return date_obj - timedelta(days=offset_days)


def shift_dates_in_dataframe(df: pd.DataFrame, date_columns: List[str] = None) -> pd.DataFrame:
    """
    Apply date shifting to specified columns in a DataFrame.
    
    Implements the ClarityOps date-shifting protocol:
    - Shifts all dates by a consistent offset within a session
    - Preserves relative time relationships between records
    - Protects absolute date PHI while maintaining analytical validity
    
    Args:
        df: DataFrame to process
        date_columns: List of column names containing dates. If None, auto-detects.
        
    Returns:
        DataFrame with shifted dates
    """
    df_copy = df.copy()
    offset = _get_date_shift_offset()
    
    # Auto-detect date columns if not specified
    if date_columns is None:
        date_columns = []
        for col in df_copy.columns:
            if df_copy[col].dtype == 'datetime64[ns]':
                date_columns.append(col)
            elif df_copy[col].dtype == 'object':
                # Check if column contains date-like strings
                sample = df_copy[col].dropna().head(10)
                if len(sample) > 0:
                    try:
                        pd.to_datetime(sample, errors='raise')
                        date_columns.append(col)
                    except (ValueError, TypeError):
                        pass
    
    # Apply date shifting
    for col in date_columns:
        if col in df_copy.columns:
            try:
                # Convert to datetime if needed
                if df_copy[col].dtype != 'datetime64[ns]':
                    df_copy[col] = pd.to_datetime(df_copy[col], errors='coerce')
                # Shift dates
                df_copy[col] = df_copy[col] - pd.Timedelta(days=offset)
            except Exception:
                pass  # Skip columns that can't be converted
    
    return df_copy


# ---------------------- Minimum Cell Size Enforcement ----------------------

# Minimum cell size threshold for aggregated outputs (prevents re-identification)
MIN_CELL_SIZE = 5  # Industry standard: suppress cells with fewer than 5 records


class CellSizeSuppressor:
    """
    Enforces minimum cell sizes in aggregated outputs to prevent re-identification.
    
    Implements the ClarityOps privacy protection:
    - Suppresses aggregate values derived from fewer than MIN_CELL_SIZE records
    - Replaces suppressed values with "[SUPPRESSED: n<5]"
    - Logs suppression events for audit trail
    """
    
    def __init__(self, min_size: int = MIN_CELL_SIZE):
        self.min_size = min_size
        self.suppression_count = 0
        self.suppressed_cells = []
    
    def check_and_suppress(self, value: Any, count: int, context: str = "") -> Any:
        """
        Check if a value should be suppressed based on its underlying count.
        
        Args:
            value: The aggregate value (mean, sum, etc.)
            count: Number of records underlying this value
            context: Description of what this value represents (for logging)
            
        Returns:
            Original value if count >= min_size, otherwise suppression marker
        """
        if count < self.min_size:
            self.suppression_count += 1
            self.suppressed_cells.append({
                "context": context,
                "count": count,
                "threshold": self.min_size
            })
            return f"[SUPPRESSED: n<{self.min_size}]"
        return value
    
    def suppress_small_groups_in_dataframe(
        self, 
        df: pd.DataFrame, 
        count_column: str,
        value_columns: List[str],
        group_description: str = "group"
    ) -> pd.DataFrame:
        """
        Suppress values in a DataFrame where the count column is below threshold.
        
        Args:
            df: DataFrame with aggregated data
            count_column: Name of column containing record counts
            value_columns: Columns whose values should be suppressed if count is low
            group_description: Description for logging
            
        Returns:
            DataFrame with small-cell values suppressed
        """
        df_copy = df.copy()
        
        for idx, row in df_copy.iterrows():
            count = row.get(count_column, self.min_size)
            if pd.notna(count) and count < self.min_size:
                for col in value_columns:
                    if col in df_copy.columns:
                        original_value = df_copy.at[idx, col]
                        df_copy.at[idx, col] = f"[SUPPRESSED: n<{self.min_size}]"
                        self.suppression_count += 1
                        self.suppressed_cells.append({
                            "context": f"{group_description} at index {idx}, column {col}",
                            "original_count": count,
                            "threshold": self.min_size
                        })
        
        return df_copy
    
    def get_suppression_report(self) -> Dict[str, Any]:
        """Generate a report of all suppressions applied."""
        return {
            "total_suppressions": self.suppression_count,
            "min_cell_size_threshold": self.min_size,
            "suppressed_cells": self.suppressed_cells
        }


def enforce_minimum_cell_size(
    output_data: Dict[str, Any], 
    count_key_patterns: List[str] = None
) -> tuple[Dict[str, Any], Dict[str, Any]]:
    """
    Scan output data for small cell sizes and suppress as needed.
    
    This is applied to the validated JSON output before report generation
    to ensure no re-identifiable small-group statistics are exposed.
    
    Args:
        output_data: The validated JSON output dictionary
        count_key_patterns: Keys that indicate count fields (default: common patterns)
        
    Returns:
        Tuple of (processed_data, suppression_report)
    """
    if count_key_patterns is None:
        count_key_patterns = ['count', 'n', 'num', 'total', 'records', 'sample_size', 'volume']
    
    suppressor = CellSizeSuppressor(MIN_CELL_SIZE)
    processed = _recursive_cell_size_check(output_data, count_key_patterns, suppressor)
    
    report = suppressor.get_suppression_report()
    if report["total_suppressions"] > 0:
        safe_log("cell_size_suppression", report)
    
    return processed, report


def _recursive_cell_size_check(
    data: Any, 
    count_patterns: List[str], 
    suppressor: CellSizeSuppressor,
    path: str = ""
) -> Any:
    """Recursively check and suppress small cells in nested data structures."""
    
    if isinstance(data, dict):
        # Look for count fields in this dict
        count_value = None
        for key in data.keys():
            if any(pattern in key.lower() for pattern in count_patterns):
                try:
                    count_value = int(data[key])
                    break
                except (ValueError, TypeError):
                    pass
        
        # If we found a small count, suppress numeric values in this dict
        result = {}
        for key, value in data.items():
            new_path = f"{path}.{key}" if path else key
            
            if count_value is not None and count_value < MIN_CELL_SIZE:
                # Suppress numeric values (but not the count itself or identifiers)
                if isinstance(value, (int, float)) and not any(p in key.lower() for p in count_patterns):
                    result[key] = suppressor.check_and_suppress(value, count_value, new_path)
                else:
                    result[key] = _recursive_cell_size_check(value, count_patterns, suppressor, new_path)
            else:
                result[key] = _recursive_cell_size_check(value, count_patterns, suppressor, new_path)
        
        return result
    
    elif isinstance(data, list):
        return [
            _recursive_cell_size_check(item, count_patterns, suppressor, f"{path}[{i}]")
            for i, item in enumerate(data)
        ]
    
    else:
        return data


def safe_log(event_name: str, meta: dict | None = None):
    try:
        meta = (meta or {}).copy()
        meta.pop("raw", None)
        log_event(event_name, None, meta)
    except Exception:
        pass


# ---------------------- Audit Trail ----------------------

import hashlib
from datetime import datetime as dt


def _hash_content(content: str) -> str:
    """Generate a short hash for content identification without storing full content."""
    return hashlib.sha256(content.encode('utf-8')).hexdigest()[:16]


def _safe_truncate(text: str, max_length: int = 500) -> str:
    """Safely truncate text for logging without exposing sensitive data."""
    if not text or len(text) <= max_length:
        return text
    return text[:max_length] + f"... [truncated, {len(text)} total chars]"


def log_analysis_start(user_prompt: str, filenames: List[str], schema_summary: List[Dict[str, Any]]) -> str:
    """
    Log the start of an analysis session.
    
    Captures data lineage: what files were uploaded and their schemas.
    Returns a session_id for correlating subsequent log entries.
    """
    session_id = dt.now().strftime("%Y%m%d_%H%M%S_") + _hash_content(user_prompt)[:8]
    
    # Build schema summary without sensitive data
    schema_log = []
    for schema in schema_summary:
        schema_log.append({
            "filename": schema.get("filename"),
            "rows": schema.get("rows"),
            "columns": schema.get("columns"),
            "column_names": schema.get("column_names"),
            "dtypes": schema.get("dtypes"),
        })
    
    safe_log("analysis_session_start", {
        "session_id": session_id,
        "prompt_hash": _hash_content(user_prompt),
        "prompt_length": len(user_prompt),
        "file_count": len(filenames),
        "filenames": filenames,
        "schemas": schema_log,
        "timestamp": dt.now().isoformat(),
    })
    
    return session_id


def log_code_generation(session_id: str, generated_code: str) -> None:
    """
    Log the generated analysis code.
    
    Captures code execution logs for traceability.
    Every finding can be traced back to specific lines of generated Python code.
    """
    # Parse code to extract key operations for the log
    code_operations = []
    if "groupby" in generated_code:
        code_operations.append("groupby")
    if "merge" in generated_code or "join" in generated_code:
        code_operations.append("merge/join")
    if "pivot" in generated_code:
        code_operations.append("pivot")
    if "agg" in generated_code or "aggregate" in generated_code:
        code_operations.append("aggregate")
    if "sort" in generated_code:
        code_operations.append("sort")
    if "filter" in generated_code or ".loc[" in generated_code or ".query(" in generated_code:
        code_operations.append("filter")
    if "mean(" in generated_code or "sum(" in generated_code or "count(" in generated_code:
        code_operations.append("statistics")
    
    safe_log("code_generation", {
        "session_id": session_id,
        "code_hash": _hash_content(generated_code),
        "code_length": len(generated_code),
        "code_lines": generated_code.count('\n') + 1,
        "operations_detected": code_operations,
        "timestamp": dt.now().isoformat(),
    })


def log_code_execution(session_id: str, success: bool, output_size: int, error: str = None) -> None:
    """
    Log the result of code execution.
    
    Captures execution status and output metadata.
    """
    safe_log("code_execution", {
        "session_id": session_id,
        "success": success,
        "output_size_bytes": output_size,
        "error": _safe_truncate(error) if error else None,
        "timestamp": dt.now().isoformat(),
    })


def log_analysis_complete(
    session_id: str,
    validated_output_keys: List[str],
    report_length: int,
    total_duration_ms: float = None
) -> None:
    """
    Log successful completion of analysis.
    
    Captures analytical provenance: what was produced and output structure.
    """
    safe_log("analysis_session_complete", {
        "session_id": session_id,
        "output_keys": validated_output_keys,
        "output_key_count": len(validated_output_keys),
        "report_length": report_length,
        "duration_ms": total_duration_ms,
        "timestamp": dt.now().isoformat(),
    })


def log_analysis_error(session_id: str, error_type: str, error_message: str) -> None:
    """
    Log analysis failure.
    
    Captures error information for debugging without exposing sensitive data.
    """
    safe_log("analysis_session_error", {
        "session_id": session_id,
        "error_type": error_type,
        "error_message": _safe_truncate(error_message),
        "timestamp": dt.now().isoformat(),
    })


# ---------------------- JSON Validation ----------------------

class JSONValidationError(Exception):
    """Raised when script output fails JSON validation."""
    pass


def validate_json_output(raw_output: str) -> Dict[str, Any]:
    """
    Validates and parses JSON output from the analysis script.
    Creates the hard boundary between calculation and communication.
    """
    cleaned_output = raw_output.strip()
    
    if not cleaned_output:
        raise JSONValidationError(
            "Analysis script produced no output. The script must print a JSON object to stdout."
        )
    
    # Handle multiple JSON objects (take the last complete one)
    json_candidates = []
    brace_count = 0
    current_start = None
    
    for i, char in enumerate(cleaned_output):
        if char == '{':
            if brace_count == 0:
                current_start = i
            brace_count += 1
        elif char == '}':
            brace_count -= 1
            if brace_count == 0 and current_start is not None:
                json_candidates.append(cleaned_output[current_start:i+1])
                current_start = None
    
    if not json_candidates:
        json_to_parse = cleaned_output
    else:
        json_to_parse = json_candidates[-1]
    
    try:
        parsed = json.loads(json_to_parse)
    except json.JSONDecodeError as e:
        error_context = cleaned_output[:500] + ("..." if len(cleaned_output) > 500 else "")
        raise JSONValidationError(
            f"Analysis script produced invalid JSON. Parse error: {e.msg} at position {e.pos}.\n\n"
            f"Raw output (first 500 chars):\n```\n{error_context}\n```"
        )
    
    if not isinstance(parsed, dict):
        raise JSONValidationError(
            f"Analysis output must be a JSON object (dictionary), not {type(parsed).__name__}. "
            f"Ensure your script prints a dictionary with json.dumps()."
        )
    
    if "error" in parsed:
        error_msg = parsed.get("error", "Unknown error")
        raise JSONValidationError(f"Analysis script reported an error: {error_msg}")
    
    if not parsed:
        raise JSONValidationError(
            "Analysis script produced an empty JSON object. "
            "Ensure your script populates the output dictionary with findings."
        )
    
    safe_log("json_validation_success", {"keys": list(parsed.keys()), "key_count": len(parsed)})
    return parsed


def format_validated_json_for_report(validated_data: Dict[str, Any]) -> str:
    """Formats validated JSON data for the report generator."""
    try:
        return json.dumps(validated_data, indent=2, default=str, ensure_ascii=False)
    except (TypeError, ValueError) as e:
        safe_log("json_format_warning", {"error": str(e)})
        return json.dumps({"raw_data": str(validated_data)}, indent=2)


# ---------------------- Schema Validation ----------------------

class SchemaValidationError(Exception):
    """Raised when input data fails schema validation."""
    pass


def validate_dataframe_schema(df: pd.DataFrame, filename: str) -> Dict[str, Any]:
    """
    Validates a DataFrame's schema before analysis.
    
    Implements the ClarityOps requirement:
    "Schema validation examines column names, data types, and value ranges 
    before analysis begins. The system rejects malformed inputs."
    
    Args:
        df: The DataFrame to validate
        filename: Original filename for error messages
        
    Returns:
        Dict containing schema metadata for logging
        
    Raises:
        SchemaValidationError: If the DataFrame fails validation
    """
    errors = []
    warnings = []
    
    # Check 1: DataFrame is not empty
    if df.empty:
        raise SchemaValidationError(f"File '{filename}' contains no data (empty DataFrame).")
    
    # Check 2: Has at least one column
    if len(df.columns) == 0:
        raise SchemaValidationError(f"File '{filename}' has no columns.")
    
    # Check 3: Column names are valid (not empty, no duplicates)
    col_names = list(df.columns)
    
    # Check for empty column names
    empty_cols = [i for i, c in enumerate(col_names) if str(c).strip() == "" or pd.isna(c)]
    if empty_cols:
        errors.append(f"Empty column names at positions: {empty_cols}")
    
    # Check for duplicate column names
    seen = {}
    duplicates = []
    for col in col_names:
        col_str = str(col)
        if col_str in seen:
            duplicates.append(col_str)
        seen[col_str] = True
    if duplicates:
        errors.append(f"Duplicate column names: {list(set(duplicates))}")
    
    # Check 4: Data types are recognizable (skip if duplicates found)
    has_duplicates = len(duplicates) > 0
    if not has_duplicates:
        for col in df.columns:
            dtype = df[col].dtype
            if dtype == object:
                # Check if object column has mixed types that could cause issues
                sample = df[col].dropna().head(100)
                if len(sample) > 0:
                    types_in_col = set(type(x).__name__ for x in sample)
                    if len(types_in_col) > 2:  # Allow str + one other type
                        warnings.append(f"Column '{col}' has mixed types: {types_in_col}")
    
    # Check 5: Reasonable row count (warn if very large)
    if len(df) > 1_000_000:
        warnings.append(f"Large dataset ({len(df):,} rows) may impact performance.")
    
    # Check 6: Check for completely null columns (skip if duplicates found)
    if not has_duplicates:
        null_cols = [col for col in df.columns if df[col].isna().all()]
        if null_cols:
            warnings.append(f"Columns with all null values: {null_cols}")
    
    # Check 7: Validate numeric columns have reasonable ranges (skip if duplicates found)
    if not has_duplicates:
        import numpy as np
        for col in df.select_dtypes(include=['number']).columns:
            col_data = df[col].dropna()
            if len(col_data) > 0:
                if np.isinf(col_data).any():
                    errors.append(f"Column '{col}' contains infinite values.")
    
    # If there are critical errors, reject the input
    if errors:
        error_msg = f"Schema validation failed for '{filename}':\n" + "\n".join(f"  - {e}" for e in errors)
        raise SchemaValidationError(error_msg)
    
    # Build schema metadata
    schema_info = {
        "filename": filename,
        "rows": len(df),
        "columns": len(df.columns),
        "column_names": col_names,
        "dtypes": {str(col): str(df[col].dtype) for col in df.columns},
        "null_counts": {str(col): int(df[col].isna().sum()) for col in df.columns},
        "warnings": warnings,
    }
    
    # Log warnings but don't fail
    if warnings:
        safe_log("schema_validation_warnings", {"filename": filename, "warnings": warnings})
    
    safe_log("schema_validation_passed", {"filename": filename, "rows": len(df), "columns": len(df.columns)})
    
    return schema_info


def validate_all_dataframes(dataframes: List[pd.DataFrame], filenames: List[str]) -> List[Dict[str, Any]]:
    """
    Validates all uploaded DataFrames.
    
    Args:
        dataframes: List of DataFrames to validate
        filenames: Corresponding filenames
        
    Returns:
        List of schema metadata dicts
        
    Raises:
        SchemaValidationError: If any DataFrame fails validation
    """
    schema_infos = []
    all_errors = []
    
    for df, filename in zip(dataframes, filenames):
        try:
            schema_info = validate_dataframe_schema(df, filename)
            schema_infos.append(schema_info)
        except SchemaValidationError as e:
            all_errors.append(str(e))
    
    if all_errors:
        raise SchemaValidationError("\n\n".join(all_errors))
    
    return schema_infos


# ---------------------- Sandbox Execution ----------------------

class SandboxViolationError(Exception):
    """Raised when generated code attempts forbidden operations."""
    pass


# Restricted import function that only allows safe modules
_ALLOWED_MODULES = frozenset({
    "json", "math", "statistics", "collections", "itertools", "functools",
    "operator", "string", "re", "datetime", "decimal", "fractions",
    "random", "copy", "types", "typing", "dataclasses", "enum",
    "numpy", "pandas", "scipy.stats",
})

_BLOCKED_MODULES = frozenset({
    "os", "sys", "subprocess", "shutil", "pathlib", "glob",
    "socket", "http", "urllib", "requests", "ftplib", "smtplib",
    "pickle", "shelve", "marshal", "importlib", "builtins",
    "ctypes", "multiprocessing", "threading", "asyncio",
    "eval", "exec", "compile", "open", "file", "input",
    "code", "codeop", "pty", "tty", "termios", "resource",
    "signal", "mmap", "sysconfig", "platform",
})


def _safe_import(name: str, globals_dict=None, locals_dict=None, fromlist=(), level=0):
    """Restricted import that only allows whitelisted modules."""
    import builtins as _builtins
    
    base_module = name.split('.')[0]
    
    if base_module in _BLOCKED_MODULES or name in _BLOCKED_MODULES:
        raise SandboxViolationError(f"Import of '{name}' is not allowed in sandbox environment.")
    
    if base_module not in _ALLOWED_MODULES and name not in _ALLOWED_MODULES:
        raise SandboxViolationError(f"Import of '{name}' is not allowed. Allowed modules: {', '.join(sorted(_ALLOWED_MODULES))}")
    
    return _builtins.__import__(name, globals_dict, locals_dict, fromlist, level)


def _create_sandbox_builtins() -> Dict[str, Any]:
    """
    Creates a restricted builtins dict that prevents dangerous operations.
    Allows safe operations needed for data analysis.
    """
    import builtins
    
    # Safe builtins for data analysis
    safe_builtins = {
        # Types and constructors
        "bool": builtins.bool,
        "int": builtins.int,
        "float": builtins.float,
        "str": builtins.str,
        "list": builtins.list,
        "dict": builtins.dict,
        "tuple": builtins.tuple,
        "set": builtins.set,
        "frozenset": builtins.frozenset,
        "bytes": builtins.bytes,
        "bytearray": builtins.bytearray,
        "complex": builtins.complex,
        "slice": builtins.slice,
        "type": builtins.type,
        "object": builtins.object,
        
        # Iteration and sequences
        "range": builtins.range,
        "enumerate": builtins.enumerate,
        "zip": builtins.zip,
        "map": builtins.map,
        "filter": builtins.filter,
        "reversed": builtins.reversed,
        "sorted": builtins.sorted,
        "iter": builtins.iter,
        "next": builtins.next,
        "len": builtins.len,
        
        # Math and comparison
        "abs": builtins.abs,
        "min": builtins.min,
        "max": builtins.max,
        "sum": builtins.sum,
        "pow": builtins.pow,
        "round": builtins.round,
        "divmod": builtins.divmod,
        
        # Logic and identity
        "all": builtins.all,
        "any": builtins.any,
        "isinstance": builtins.isinstance,
        "issubclass": builtins.issubclass,
        "id": builtins.id,
        "hash": builtins.hash,
        
        # String and representation
        "repr": builtins.repr,
        "ascii": builtins.ascii,
        "chr": builtins.chr,
        "ord": builtins.ord,
        "format": builtins.format,
        "print": builtins.print,
        
        # Attribute access
        "getattr": builtins.getattr,
        "setattr": builtins.setattr,
        "hasattr": builtins.hasattr,
        "delattr": builtins.delattr,
        
        # Other safe operations
        "callable": builtins.callable,
        "dir": builtins.dir,
        "vars": builtins.vars,
        "locals": builtins.locals,
        "globals": lambda: {},  # Return empty dict to prevent access to real globals
        
        # Exceptions (needed for error handling in scripts)
        "Exception": builtins.Exception,
        "ValueError": builtins.ValueError,
        "TypeError": builtins.TypeError,
        "KeyError": builtins.KeyError,
        "IndexError": builtins.IndexError,
        "AttributeError": builtins.AttributeError,
        "ZeroDivisionError": builtins.ZeroDivisionError,
        "StopIteration": builtins.StopIteration,
        "RuntimeError": builtins.RuntimeError,
        
        # Constants
        "None": None,
        "True": True,
        "False": False,
        "Ellipsis": builtins.Ellipsis,
        "NotImplemented": builtins.NotImplemented,
        
        # Restricted import
        "__import__": _safe_import,
        "__name__": "__sandbox__",
        "__doc__": None,
    }
    
    return safe_builtins


def _create_sandbox_namespace(dataframes: List[Any]) -> Dict[str, Any]:
    """
    Creates a sandboxed execution namespace with only safe operations.
    
    This implements the ClarityOps security model:
    - Memory-only execution (no file I/O)
    - No network access
    - No system calls
    - Only data analysis libraries available
    """
    import numpy as np
    
    sandbox_builtins = _create_sandbox_builtins()
    
    namespace = {
        "__builtins__": sandbox_builtins,
        # Pre-loaded safe modules
        "dfs": dataframes,
        "pd": pd,
        "np": np,
        "re": re,
        "json": json,
        # Common pandas/numpy items for convenience
        "DataFrame": pd.DataFrame,
        "Series": pd.Series,
        "NaN": np.nan,
        "nan": np.nan,
    }
    
    return namespace


def execute_in_sandbox(script: str, dataframes: List[Any]) -> str:
    """
    Executes the analysis script in a sandboxed environment.
    
    Returns the captured stdout output.
    
    Raises:
        SandboxViolationError: If script attempts forbidden operations
        Exception: For other execution errors
    """
    # Pre-execution safety checks on the script text
    forbidden_patterns = [
        (r'\bopen\s*\(', "File operations (open) are not allowed"),
        (r'\bos\s*\.', "OS module access is not allowed"),
        (r'\bsys\s*\.', "Sys module access is not allowed"),
        (r'\bsubprocess', "Subprocess module is not allowed"),
        (r'\bsocket\s*\.', "Network operations are not allowed"),
        (r'\burllib', "Network operations are not allowed"),
        (r'\brequests\s*\.', "Network operations are not allowed"),
        (r'\bhttp\s*\.', "Network operations are not allowed"),
        (r'\beval\s*\(', "eval() is not allowed"),
        (r'\bexec\s*\(', "exec() is not allowed"),
        (r'\bcompile\s*\(', "compile() is not allowed"),
        (r'\b__import__\s*\(', "Direct __import__ calls are not allowed"),
        (r'\bimportlib', "importlib is not allowed"),
        (r'\bpickle', "pickle module is not allowed"),
        (r'\bshutil', "shutil module is not allowed"),
        (r'\bglobals\s*\(\s*\)', "globals() access is restricted"),
        (r'\.to_csv\s*\(', "Writing files (to_csv) is not allowed"),
        (r'\.to_excel\s*\(', "Writing files (to_excel) is not allowed"),
        (r'\.to_parquet\s*\(', "Writing files (to_parquet) is not allowed"),
        (r'\.to_sql\s*\(', "Database operations (to_sql) are not allowed"),
        (r'pd\.read_', "Reading files is not allowed - use the provided dfs variable"),
    ]
    
    for pattern, message in forbidden_patterns:
        if re.search(pattern, script):
            raise SandboxViolationError(f"Security violation: {message}")
    
    # Create sandboxed namespace
    namespace = _create_sandbox_namespace(dataframes)
    
    # Capture stdout
    output_buffer = io.StringIO()
    
    try:
        with redirect_stdout(output_buffer):
            exec(script, namespace, namespace)
        return output_buffer.getvalue()
    except SandboxViolationError:
        raise
    except Exception as e:
        # Re-raise with context but don't expose internal details
        raise RuntimeError(f"Script execution error: {type(e).__name__}: {e}")


# ---------------------- Analysis Script Generation ----------------------

def _create_python_script(user_scenario: str, schema_context: str) -> str:
    EXPERT_ANALYTICAL_GUIDELINES = """
--- EXPERT ANALYTICAL GUIDELINES ---
When writing your script, you MUST follow these expert analytical principles:

**DATA INTEGRATION & LINKING:**
1.  When linking datasets, identify the correct join keys by examining column names and values. Never assume column names match across datasets.
2.  If a required column doesn't exist in a dataset, derive it from related data or clearly note its absence in the output.
3.  **DATA RECENCY IS CRITICAL:** Always prefer the most recent data when multiple time periods exist. If you have both 2013 and 2021 data, use 2021 data as the PRIMARY factor in any ranking or prioritization. Older data should only supplement, not override, recent data.

**AGGREGATION & GROUPING:**
4.  When asked about "specialties," "categories," or "types," group by the broadest categorical column first (e.g., 'Specialty' not 'Procedure').
5.  When asked about specific items, use the most granular level (e.g., specific procedures, individual facilities).
6.  Always verify the appropriate level of aggregation matches the user's question.

**PRIORITIZATION & RANKING:**
7.  To prioritize locations/facilities, create a composite score using: (a) most recent population/membership data as PRIMARY weight (60-70%), (b) health risk indicators as SECONDARY weight (30-40%). Recent data reflects current reality better than historical data.
8.  When ranking, consider both absolute values AND relative performance against benchmarks (provincial/national averages).
9.  Include sample sizes/record counts alongside rankings to indicate statistical reliability.

**CALCULATIONS & ESTIMATES:**
10. For time-based capacity calculations, use standard assumptions: 60 working days per 3-month period, 5 days/week, unless data specifies otherwise.
11. For cost calculations, always separate and sum component costs (startup + ongoing + variable) before multiplying by volume.
12. When extracting numeric values from text fields, use robust parsing: strip currency symbols, handle ranges (take midpoint), convert percentages.

**UNITS & VALIDATION:**
13. Preserve and label units correctly: percentages (%), currency (CAD/USD), time (days/weeks), clinical measures (mmHg for BP, % for A1c, kg/m² for BMI).
14. Validate calculated values against reasonable ranges (e.g., A1c typically 4-14%, BP typically 60-200 mmHg).
15. Flag outliers or unexpected values in the output for human review.

**OUTPUT COMPLETENESS:**
16. For each evaluation question, ensure the JSON output contains all data needed to answer it fully.
17. Include both raw values AND calculated metrics (averages, percentages, rankings).
18. When comparing to benchmarks, include both the benchmark value and the comparison result.
"""
    prompt_for_coder = f"""\
You are an expert Python data scientist. Your job is to write a script to extract the data needed to answer the user's request.
You have dataframes in a list `dfs`.

{EXPERT_ANALYTICAL_GUIDELINES}

--- DATA SCHEMA ---
{schema_context}
--- END DATA SCHEMA ---

CRITICAL RULES:
1.  **DO NOT READ FILES:** You MUST NOT include `pd.read_csv`. The data is ALREADY loaded in the `dfs` variable. You MUST use this variable. Failure to do so will cause a fatal error.
2.  **JSON OUTPUT ONLY:** Your script's ONLY output must be a single JSON object printed to stdout containing the raw data findings.
3.  **BE PRECISE:** Use ONLY the exact column names shown in the schema. NEVER guess or invent column names. If the schema shows columns like 'Indicator' and 'Value', do NOT try to access columns like 'Startup cost per client' directly - instead filter rows where Indicator matches the desired label.
4.  **JSON SERIALIZATION:** For DataFrame-to-dict conversion, use `json.loads(df.to_json(orient='records'))` which handles type conversion automatically. For single numeric values, use `.item()`. Avoid manual type conversion loops which are error-prone.
5.  **SINGLE JSON OUTPUT:** Print exactly ONE JSON object at the end of your script. Do not print debug statements or multiple JSON objects.
6.  **VALID JSON STRUCTURE:** The output MUST be a dictionary/object, not an array or primitive value.
7.  **SAFE DATA JOINING:** When joining/merging dataframes or looking up values across dataframes, ALWAYS check if matches exist before accessing with `.iloc[0]`. Use `.merge()` with `how='left'` or check `len(filtered_df) > 0` before accessing rows. Never assume keys will match exactly between dataframes.
8.  **CONSISTENT COLUMN NAMES:** After aggregation or renaming, always use the NEW column names in subsequent operations. Track renamed columns carefully - if you rename 'Zone' to 'zone', use 'zone' everywhere after.
9.  **SAFE ITERATION:** When iterating over mixed data structures, always check types before accessing attributes. Not all list items are dicts (some may be strings), not all values have `.items()`.
10. **KEY-VALUE DATA PATTERN:** Many healthcare datasets use key-value format (e.g., columns: 'Indicator'/'Value' or 'Metric'/'Amount'). To extract a specific value, filter rows by the key column, then access the value column: `df.loc[df['Indicator'] == 'Cost per client', 'Value'].iloc[0]`
11. **CONVERT STRINGS BEFORE MATH:** Always clean and convert strings to float/int BEFORE performing arithmetic. Use `re.sub(r'[^\\d.]', '', value)` to strip currency symbols ($), percentage signs (%), commas, and other non-numeric characters. For ranges like "8–10", split first, clean each part, convert to float, then calculate: `parts = text.split('–'); avg = (float(re.sub(r'[^\\d.]', '', parts[0])) + float(re.sub(r'[^\\d.]', '', parts[1]))) / 2`
12. **SCALAR VS VECTORIZED:** When applying a cleaning function to DataFrame columns, use `.apply()` for element-wise operations: `df['col'].apply(clean_func)`. Do NOT pass a Series to a function expecting a single value. For a single extracted value, use `.iloc[0]` to get the scalar before processing.
13. **COLUMN AVAILABILITY AFTER MERGE:** After merging DataFrames, only columns explicitly included in the merge will be available. If you need 'city' later, include it in the initial selection: `df[['facility_name', 'city', 'latitude']]`. Before accessing any column, verify it exists in the DataFrame or was included in the merge.
14. **BROADCASTING SCALAR TO ALL ROWS:** To add a single value to all rows in a DataFrame, do NOT use merge. Instead, extract the scalar first, then assign directly: `value = other_df.loc[condition, 'col'].iloc[0]; df['new_col'] = value`. This broadcasts the scalar to all rows.
15. **EXTRACT OPERATIONAL PARAMETERS FROM DATA:** For capacity, rates, or throughput values, ALWAYS extract them directly from the data rather than assuming values. Print/log the extracted value to verify it matches expectations. For ranges like "8–10", use the LOWER bound for conservative estimates in capacity planning, not the midpoint or upper bound.

--- USER'S SCENARIO ---
{user_scenario}

--- PYTHON SCRIPT ---
Now, write the complete Python script that performs the analysis and prints a single, serializable JSON object.
```python
"""
    generated_text = cohere_chat(prompt_for_coder)
    match = re2.search(r"```python\n(.*?)```", generated_text, re2.DOTALL)
    if match:
        return match.group(1).strip()
    return "print(json.dumps({'error': 'Failed to generate a valid Python script.'}))"


def _generate_long_report(prompt: str) -> str:
    try:
        client = _co_client()
        if not client:
            return "Error: Cohere client not initialized."
        response = client.chat(
            model=COHERE_MODEL_PRIMARY,
            message=prompt,
            max_tokens=4096,
        )
        return response.text
    except Exception as e:
        safe_log("cohere_chat_error", {"err": str(e)})
        return f"Error during final report generation: {e}"


def _generate_final_report(user_scenario: str, validated_json_str: str) -> str:
    prompt_for_writer = f"""\
You are an expert healthcare management consultant and data analyst.
A data science script has run to extract key findings. You have the user's original request and the validated JSON data.

Your task is to synthesize these validated findings into a single, comprehensive, and professional report that directly answers all of the user's questions with detailed justifications.

--- USER'S ORIGINAL SCENARIO & DELIVERABLES ---
{user_scenario}
--- END SCENARIO ---

--- VALIDATED DATA FINDINGS (JSON) ---
{validated_json_str}
--- END VALIDATED DATA ---

--- ANALYTICAL INTERPRETATION GUIDELINES ---
When writing your report, follow these principles:

**ACCURACY & UNITS:**
- Report numerical values with appropriate precision (1-2 decimal places for percentages, whole numbers for counts).
- Always include correct units: % for percentages, days for wait times, $ for costs, mmHg for blood pressure, % for A1c, kg/m² for BMI.
- Verify that values make clinical/operational sense before reporting (e.g., A1c should be 4-14%, not measured in mmHg).

**CONTEXT & BENCHMARKS:**
- Compare findings against relevant benchmarks (provincial averages, national standards, historical baselines).
- Explain what "good" vs "poor" performance means in context.
- Quantify differences (e.g., "50 days above average" not just "higher than average").

**CAUSATION & INTERPRETATION:**
- Distinguish correlation from causation; avoid overstating causal claims.
- Consider confounding factors (case complexity, patient demographics, resource constraints).
- Acknowledge data limitations and uncertainty.

**RECOMMENDATIONS:**
- Make recommendations specific, actionable, and tied directly to the data findings.
- Prioritize recommendations by impact and feasibility.
- Include implementation considerations (resources needed, timeline, risks).
- Suggest metrics for monitoring success.

**COMPLETENESS:**
- Address EVERY evaluation question explicitly.
- If data is insufficient to answer a question fully, state what's missing and provide the best available answer.
- Cross-reference related findings to provide a coherent narrative.

Now, write the final, polished report. The report MUST:
1.  Follow the "Expected Output Format" requested by the user.
2.  Use tables, bullet points, and DETAILED narrative justifications for each recommendation.
3.  Synthesize the validated data into actionable insights. Do not just copy the raw numbers; interpret them.
4.  Ensure you fully address ALL evaluation questions, especially the final recommendations.
5.  Verify all units and values are clinically/operationally plausible before including them.
"""
    return _generate_long_report(prompt_for_writer)


def _append_msg(h: List[Dict[str, str]], r: str, c: str) -> List[Dict[str, str]]:
    return (h or []) + [{"role": r, "content": c}]


def ping_cohere() -> str:
    try:
        cli = _co_client()
        if not cli:
            return "Cohere client not initialized."
        vecs = cohere_embed(["hello", "world"])
        return f"Cohere OK ✅ (model={COHERE_MODEL_PRIMARY})" if vecs else "Cohere reachable."
    except Exception as e:
        return f"Cohere ping failed: {e}"


def handle(user_msg: str, files: list, yield_update) -> str:
    try:
        safe_in, blocked_in, reason_in = safety_filter(user_msg, mode="input")
        if blocked_in:
            return refusal_reply(reason_in)

        redacted_in = safe_in
        if PHI_MODE and REDACT_BEFORE_LLM:
            redacted_in = redact_phi(safe_in)

        file_paths: List[str] = [getattr(f, "name", None) or f for f in (files or [])]

        if file_paths:
            dataframes, schema_parts, filenames = [], [], []
            for i, p in enumerate(file_paths):
                if p.endswith(".csv"):
                    try:
                        df = pd.read_csv(p)
                    except UnicodeDecodeError:
                        df = pd.read_csv(p, encoding="latin1")
                    dataframes.append(df)
                    filenames.append(os.path.basename(p))
                    schema_parts.append(
                        f"DataFrame `dfs[{i}]` (`{os.path.basename(p)}`):\n{df.head().to_markdown()}\n"
                    )

            if not dataframes:
                return "Please upload at least one CSV file."

            # Schema Validation - examines column names, data types, and value ranges
            yield_update("```\n🔎 Validating input schema...\n```")
            try:
                schema_infos = validate_all_dataframes(dataframes, filenames)
            except SchemaValidationError as e:
                safe_log("schema_validation_failed", {"error": str(e)})
                return f"**Schema Validation Failed**\n\n{e}\n\nPlease fix the data issues and re-upload."

            # PHI Protection: Apply date shifting if PHI mode is enabled
            if PHI_MODE:
                yield_update("```\n🔒 Applying PHI protections (date shifting)...\n```")
                reset_date_shift_offset()  # New session = new offset
                dataframes = [shift_dates_in_dataframe(df) for df in dataframes]
                safe_log("date_shifting_applied", {
                    "offset_days": _get_date_shift_offset(),
                    "dataframes_processed": len(dataframes)
                })

            # Start audit trail session
            import time as _time
            _start_time = _time.time()
            session_id = log_analysis_start(safe_in, filenames, schema_infos)

            schema_context = "\n".join(schema_parts)
            prompt_for_code = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in

            yield_update("```\n🧠 Generating aligned analysis script...\n```")
            analysis_script = _create_python_script(prompt_for_code, schema_context)

            # Log generated code
            log_code_generation(session_id, analysis_script)

            yield_update("```\n⚙️ Executing script in sandbox...\n```")
            try:
                raw_data_output = execute_in_sandbox(analysis_script, dataframes)
                log_code_execution(session_id, success=True, output_size=len(raw_data_output))
            except SandboxViolationError as e:
                log_code_execution(session_id, success=False, output_size=0, error=str(e))
                log_analysis_error(session_id, "sandbox_violation", str(e))
                safe_log("sandbox_violation", {"error": str(e)})
                return (
                    f"**Security Violation Detected**\n\n{e}\n\n"
                    f"The generated script attempted a forbidden operation. "
                    f"Please rephrase your request.\n\n"
                    f"Generated Script:\n```python\n{analysis_script}\n```"
                )
            except Exception as e:
                log_code_execution(session_id, success=False, output_size=0, error=str(e))
                log_analysis_error(session_id, "execution_error", str(e))
                return (
                    f"An error occurred executing the script: {e}\n\nGenerated Script:\n"
                    f"```python\n{analysis_script}\n```"
                )

            # JSON Validation - creates hard boundary between calculation and communication
            yield_update("```\n🔍 Validating JSON output...\n```")
            try:
                validated_data = validate_json_output(raw_data_output)
                safe_log("json_validation_passed", {"output_keys": list(validated_data.keys())})
            except JSONValidationError as e:
                log_analysis_error(session_id, "json_validation_error", str(e))
                safe_log("json_validation_failed", {"error": str(e)})
                return (
                    f"**JSON Validation Failed**\n\n{e}\n\n"
                    f"Generated Script:\n```python\n{analysis_script}\n```"
                )

            # PHI Protection: Enforce minimum cell sizes to prevent re-identification
            suppression_report = None
            if PHI_MODE:
                yield_update("```\n🔒 Enforcing minimum cell sizes...\n```")
                validated_data, suppression_report = enforce_minimum_cell_size(validated_data)
                if suppression_report and suppression_report.get("total_suppressions", 0) > 0:
                    safe_log("cell_size_enforcement", suppression_report)

            validated_json_str = format_validated_json_for_report(validated_data)

            yield_update("```\n✍️ Synthesizing final comprehensive report...\n```")
            writer_input = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
            final_report = _generate_final_report(writer_input, validated_json_str)

            # Log successful completion
            _end_time = _time.time()
            _duration_ms = (_end_time - _start_time) * 1000
            log_analysis_complete(
                session_id,
                validated_output_keys=list(validated_data.keys()),
                report_length=len(final_report),
                total_duration_ms=_duration_ms
            )

            # Append code traceability section
            # "Every finding traces back to specific lines of generated Python code"
            traceability_section = (
                f"\n\n---\n\n"
                f"<details>\n"
                f"<summary>📜 <strong>View Analysis Code</strong> (click to expand)</summary>\n\n"
                f"The findings in this report were generated by the following Python code, "
                f"executed in a sandboxed environment:\n\n"
                f"```python\n{analysis_script}\n```\n\n"
                f"**Session ID:** `{session_id}`\n\n"
                f"</details>"
            )

            return _sanitize_text(final_report) + traceability_section
        else:
            chat_input = redacted_in if (PHI_MODE and not ALLOW_EXTERNAL_PHI) else safe_in
            prompt = f"{GENERAL_CONVERSATION_PROMPT}\n\nUser: {chat_input}\nAssistant:"
            return _sanitize_text(cohere_chat(prompt) or "How can I help further?")

    except Exception as e:
        safe_log("app_error", {"err": str(e)})
        return "A critical error occurred. Please contact your administrator." if PHI_MODE else f"A critical error occurred: {e}"


PRIVACY_POLICY_TEXT = load_markdown_text("privacy_policy.md")
TERMS_OF_SERVICE_TEXT = load_markdown_text("terms_of_service.md")


# ---------------------- UI Assets ----------------------

SLEEK_CSS = """
:root, body, #root, .gradio-container { height: 100%; }
.gradio-container { padding: 0 !important; }
.block { padding: 0 !important; }

.header {
  padding: 20px 28px;
  background: linear-gradient(135deg, #0e1726, #1d2a44 60%, #243a5e);
  color: #fff;
  display: flex; align-items: center; justify-content: space-between;
  gap: 16px;
}
.header h1 { margin: 0; font-size: 22px; letter-spacing: 0.3px; font-weight: 600; }
.header .badge { font-size: 12px; opacity: 0.9; background:#ffffff22; padding:6px 10px; border-radius: 999px; }

.main {
  display: grid;
  grid-template-columns: 420px 1fr;
  gap: 16px;
  padding: 16px;
  height: calc(100vh - 72px);
  box-sizing: border-box;
}
.left, .right {
  background: #0b1020;
  color: #e9edf3;
  border-radius: 16px;
  border: 1px solid #1c2642;
}
.left { padding: 16px; display: flex; flex-direction: column; gap: 12px; }
.right { padding: 0; display: flex; flex-direction: column; }

.panel-title { font-size: 14px; font-weight: 600; color: #aeb8cc; margin-bottom: 6px; }
.helper { font-size: 12px; color: #97a3bb; margin-bottom: 8px; }

.actions {
  display: flex; gap: 8px; align-items: center; justify-content: stretch;
}
.actions .gr-button { flex: 1; }

.right .tabs { height: 100%; display: flex; flex-direction: column; }
.right .tabitem { flex: 1; display: flex; flex-direction: column; overflow: hidden; }
#chatbot_container { flex: 1; min-height: 600px; max-height: calc(100vh - 150px); overflow-y: auto; }
#chatbot_container > * { min-height: 600px; }

.hr { height: 1px; background: #16203b; margin: 10px 0; }
.voice-hint { font-size: 12px; color:#9fb0cc; margin-top: 4px; }
"""

VOICE_STT_HTML = """
<script>
let __rs_rec = null;
function rs_toggle_stt(elemId){
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
  if (!SpeechRecognition){
    alert("This browser does not support Speech Recognition. Try Chrome or Edge.");
    return;
  }
  if (__rs_rec){ __rs_rec.stop(); __rs_rec = null; return; }
  __rs_rec = new SpeechRecognition();
  __rs_rec.lang = "en-US";
  __rs_rec.interimResults = true;
  __rs_rec.continuous = true;

  const box = document.querySelector(`#${elemId} textarea`);
  if (!box){ alert("Prompt box not found."); return; }
  let base = box.value || "";

  __rs_rec.onresult = (ev) => {
    let t = "";
    for (let i = ev.resultIndex; i < ev.results.length; i++){
      t += ev.results[i].transcript;
    }
    box.value = (base + " " + t).trim();
    box.dispatchEvent(new Event("input", { bubbles: true }));
  };
  __rs_rec.onend = () => { __rs_rec = null; };
  __rs_rec.start();
}
</script>
"""


# ---------------------- Gradio UI ----------------------

with gr.Blocks(theme=gr.themes.Soft(), css=SLEEK_CSS, fill_width=True) as demo:
    assessment_history = gr.State([])

    with gr.Row(elem_classes=["header"]):
        gr.Markdown("<h1>Clarity Ops Augmented Decision Support</h1>")
        pill = "PHI Mode ON · history off" if (PHI_MODE and not PERSIST_HISTORY) else \
               "PHI Mode ON" if PHI_MODE else "PHI Mode OFF"
        gr.Markdown(f"<span class='badge'>{pill}</span>")

    with gr.Row(elem_classes=["main"]):
        with gr.Column(elem_classes=["left"]):
            gr.Markdown("<div class='panel-title'>New Assessment</div>")
            gr.Markdown("<div class='helper'>Upload CSVs for analysis, or enter a prompt. Voice works in modern browsers.</div>")
            files_input = gr.Files(
                label="Upload Data Files (.csv)",
                file_count="multiple",
                type="filepath",
                file_types=[".csv"],
            )
            prompt_input = gr.Textbox(
                label="Prompt",
                placeholder="Paste your scenario or question here...",
                lines=12,
                elem_id="prompt_box",
                autofocus=True,
            )

            with gr.Row(elem_classes=["actions"]):
                send_btn = gr.Button("▶️ Run Analysis", variant="primary")
                clear_btn = gr.Button("🧹 Clear")
                voice_btn = gr.Button("🎙️ Voice")

            gr.Markdown("<div class='voice-hint'>Click Voice to start/stop dictation into the prompt box.</div>")
            ping_btn = gr.Button("🔌 Ping Cohere")
            ping_out = gr.Markdown()

            gr.Markdown("<div class='hr'></div>")
            if PHI_MODE:
                gr.Markdown(
                    "⚠️ **PHI Mode:** History persistence is disabled by default. Avoid unnecessary identifiers."
                )

            with gr.Accordion("Privacy & Terms", open=False):
                gr.Markdown(PRIVACY_POLICY_TEXT)
                gr.Markdown("<div class='hr'></div>")
                gr.Markdown(TERMS_OF_SERVICE_TEXT)

        with gr.Column(elem_classes=["right"]):
            with gr.Tabs(elem_classes=["tabs"]):
                with gr.TabItem("Current Assessment", id=0, elem_classes=["tabitem"]):
                    with gr.Column(elem_id="chatbot_container"):
                        chat_history_output = gr.Chatbot(label="Analysis Output", type="messages", container=False, autoscroll=True, height=700)
                with gr.TabItem("Assessment History", id=1, elem_classes=["tabitem"]):
                    gr.Markdown("### Review Past Assessments")
                    history_dropdown = gr.Dropdown(label="Select an assessment to review", choices=[])
                    history_display = gr.Markdown(label="Selected Assessment Details")

    gr.HTML(VOICE_STT_HTML)

    def run_analysis_wrapper(prompt, files, chat_history_list, history_state_list):
        if not prompt:
            gr.Warning("Please enter a prompt.")
            yield chat_history_list, history_state_list, gr.update()
            return

        chat_with_user_msg = _append_msg(chat_history_list, "user", prompt)

        def dummy_update(message: str):
            pass

        thinking_message = _append_msg(
            chat_with_user_msg,
            "assistant",
            "```\n🧠 Generating and executing analysis... Please wait.\n```",
        )
        yield thinking_message, history_state_list, gr.update()

        ai_response_text = handle(prompt, files, dummy_update)

        final_chat = _append_msg(chat_with_user_msg, "assistant", ai_response_text)
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        file_names: List[str] = []
        if files:
            file_names = [
                os.path.basename(f.name if hasattr(f, "name") else f) for f in files
            ]

        new_entry = {
            "id": timestamp,
            "prompt": prompt,
            "files": file_names,
            "response": ai_response_text,
            "chat_history": final_chat,
        }

        if PERSIST_HISTORY and (not PHI_MODE or (PHI_MODE and HISTORY_TTL_DAYS > 0)):
            updated_history: List[Dict[str, Any]] = (history_state_list or []) + [new_entry]
        else:
            updated_history = history_state_list or []

        history_labels = [f"{item['id']} - {item['prompt'][:40]}..." for item in updated_history]

        yield final_chat, updated_history, gr.update(choices=history_labels)

    def view_history(selection: str, history_state_list: List[Dict[str, Any]]) -> str:
        if not selection or not history_state_list:
            return ""
        try:
            selected_id = selection.split(" - ", 1)[0]
        except Exception:
            selected_id = selection

        selected_assessment = next(
            (item for item in history_state_list if item.get("id") == selected_id), None
        )
        if not selected_assessment:
            return "Could not find the selected assessment."

        file_list = selected_assessment.get("files", [])
        file_list_md = "\n- ".join(file_list) if file_list else "*(no files uploaded)*"

        chat_entries = selected_assessment.get("chat_history", [])
        chat_md_lines = []
        for msg in chat_entries:
            role = msg.get("role", "").capitalize()
            content = msg.get("content", "")
            chat_md_lines.append(f"**{role}:** {content}")
        chat_md = "\n\n".join(chat_md_lines)

        return f"""### Assessment from: {selected_assessment['id']}
**Files Used:**
- {file_list_md}
---
**Original Prompt:**
> {selected_assessment['prompt']}
---
**AI Generated Response:**
{selected_assessment['response']}
---
**Chat Transcript:**
{chat_md}
"""

    send_btn.click(
        run_analysis_wrapper,
        inputs=[prompt_input, files_input, chat_history_output, assessment_history],
        outputs=[chat_history_output, assessment_history, history_dropdown],
    )
    history_dropdown.change(
        view_history,
        inputs=[history_dropdown, assessment_history],
        outputs=[history_display],
    )
    clear_btn.click(
        lambda: (None, None, []),
        outputs=[prompt_input, files_input, chat_history_output],
    )
    ping_btn.click(ping_cohere, outputs=[ping_out])
    voice_btn.click(None, [], [], js="rs_toggle_stt('prompt_box')")


if __name__ == "__main__":
    if not os.getenv("COHERE_API_KEY"):
        print("🔴 COHERE_API_KEY environment variable not set. Application may not function correctly.")
    demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))