File size: 73,324 Bytes
07ffcec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
# -*- coding: utf-8 -*-
"""
OCR Model Module
Handles loading and inference of the Hurricane OCR / Typhoon OCR model
Supports: GPU-only, CPU-only, and Hybrid (GPU+CPU) modes
Supports: Base model or Fine-tuned LoRA model
"""

import sys
import io
import os

# Fix Windows console encoding for Thai characters and emojis
if sys.platform == 'win32':
    sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
    sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')

import torch
import importlib

# Import transformer symbols safely — some installs may not provide newer classes
_transformers = importlib.import_module("transformers")
AutoProcessor = getattr(_transformers, "AutoProcessor")
AutoModelForImageTextToText = getattr(_transformers, "AutoModelForImageTextToText")
AutoModelForVision2Seq = getattr(_transformers, "AutoModelForVision2Seq", None)
BitsAndBytesConfig = getattr(_transformers, "BitsAndBytesConfig", None)
AutoTokenizer = getattr(_transformers, "AutoTokenizer")
from PIL import Image
import numpy as np
from typing import Optional, Dict, Any, Tuple, List, Union
from pathlib import Path
import time

# ============================================================
# CONFIGURATION - Change these settings as needed
# ============================================================
# Model Options:
#   - Set HURRICANE_MODEL_PATH to use fine-tuned Hurricane OCR model
#   - Default: Rattatammanoon/hurricane-ocr-tlpr-v1-LoRA (LoRA adapter from HuggingFace)
#   - Set to None to use base Typhoon OCR model
# Priority: Using Hurricane OCR LoRA from HuggingFace
HURRICANE_MODEL_PATH = "Rattatammanoon/hurricane-ocr-tlpr-v1-LoRA"  # HuggingFace model

# Object Detection Model (YOLOv8-based)
# Set to HuggingFace model or local path for HurricaneOD_beta
HURRICANE_OD_MODEL_PATH = "Rattatammanoon/hurricane-od-thai-plate-detector"  # HuggingFace model

# Device Mode Options:
#   "auto"   - Automatically use GPU if available, fallback to CPU
#   "gpu"    - Force GPU only (will error if no GPU)
#   "cpu"    - Force CPU only
#   "hybrid" - Use GPU + CPU together (offload to CPU when VRAM is full)
DEVICE_MODE = "hybrid"

# Memory settings
MAX_GPU_MEMORY = "3GB"  # Maximum GPU memory (lower = more CPU offload, less total RAM)

# Offline Mode Settings
# Set to True to use only local files (no internet required)
# Base model must be downloaded and cached first
USE_OFFLINE_MODE = False  # Set to True for offline usage
LOCAL_BASE_MODEL_PATH = None  # Optional: Path to local base model (e.g., "./models/thai-trocr")
# ============================================================


def get_device_info() -> dict:
    """Get information about available devices"""
    info = {
        "cuda_available": torch.cuda.is_available(),
        "cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
        "cuda_device_name": torch.cuda.get_device_name(0) if torch.cuda.is_available() else None,
        "cuda_memory_total": None,
        "cuda_memory_free": None,
        "cuda_memory_total_gb": 0,
        "cpu_count": os.cpu_count(),
    }
    
    if info["cuda_available"]:
        try:
            total = torch.cuda.get_device_properties(0).total_memory / (1024**3)
            allocated = torch.cuda.memory_allocated(0) / (1024**3)
            free = total - allocated
            info["cuda_memory_total"] = f"{total:.1f} GB"
            info["cuda_memory_free"] = f"{free:.1f} GB"
            info["cuda_memory_total_gb"] = total
        except:
            pass
    
    return info


class PlateDetector:
    """
    YOLOv8n-based License Plate Detector
    Detects license plates in images before OCR processing
    
    Supports:
    - Pretrained YOLOv8 models (yolov8n.pt, etc.)
    - Custom trained models (HurricaneOD_beta.pt)
    """
    
    def __init__(
        self, 
        model_size: str = "n", 
        conf_threshold: float = 0.25, 
        iou_threshold: float = 0.45,
        model_path: Optional[str] = None
    ):
        """
        Initialize YOLOv8 detector
        
        Args:
            model_size: YOLOv8 model size - "n" (nano), "s" (small), "m" (medium), "l" (large), "x" (xlarge)
            conf_threshold: Confidence threshold for detection (0.0-1.0)
            iou_threshold: IoU threshold for NMS (0.0-1.0)
            model_path: Path to custom trained model (e.g., HurricaneOD_beta.pt). If None, uses pretrained model.
        """
        self.model_size = model_size
        self.conf_threshold = conf_threshold
        self.iou_threshold = iou_threshold
        # Keep model_path as string to support HuggingFace paths (e.g., "username/model-name")
        # Will convert to Path only for local file paths in load() method
        self.model_path = model_path
        self.model = None
        self._is_loaded = False
        
    def load(self, device: str = "auto"):
        """Load YOLOv8 model"""
        if self._is_loaded:
            return
        
        try:
            from ultralytics import YOLO
            
            # Determine device
            if device == "auto":
                device = "cuda" if torch.cuda.is_available() else "cpu"
            
            # Check for custom trained model (HurricaneOD_beta)
            # Priority: 1) Provided model_path, 2) HuggingFace, 3) hurricane_ocr_model, 4) training folder, 5) pretrained
            model_path_to_load = None
            model_source = None
            is_huggingface = False
            
            # Check if model_path is a HuggingFace path FIRST (format: username/model-name)
            if self.model_path and "/" in str(self.model_path) and "\\" not in str(self.model_path):
                parts = str(self.model_path).split("/")
                if len(parts) == 2 and not str(self.model_path).startswith("."):
                    # This looks like a HuggingFace path
                    model_path_to_load = str(self.model_path)
                    model_source = "HuggingFace Hub"
                    is_huggingface = True
            
            # Check if model_path is a local file path
            if not model_path_to_load and self.model_path:
                local_path = Path(self.model_path)
                if local_path.exists():
                    model_path_to_load = str(local_path)
                    model_source = "provided path"
            
            if not model_path_to_load:
                # Try to find HurricaneOD_beta model locally
                # Priority 1: hurricane_ocr_model (recommended location)
                hurricaneod_path = Path("hurricane_ocr_model/HurricaneOD_beta/HurricaneOD_beta.pt")
                # Priority 2: training folder weights
                training_best_path = Path("HurricaneOD/HurricaneOD_beta/weights/best.pt")
                training_last_path = Path("HurricaneOD/HurricaneOD_beta/weights/last.pt")
                
                if hurricaneod_path.exists():
                    model_path_to_load = str(hurricaneod_path)
                    model_source = "hurricane_ocr_model/HurricaneOD_beta"
                elif training_best_path.exists():
                    model_path_to_load = str(training_best_path)
                    model_source = "HurricaneOD/HurricaneOD_beta/weights (best.pt)"
                elif training_last_path.exists():
                    model_path_to_load = str(training_last_path)
                    model_source = "HurricaneOD/HurricaneOD_beta/weights (last.pt)"
            
            if model_path_to_load:
                if is_huggingface:
                    print(f"🔍 Loading HurricaneOD_beta model from: HuggingFace Hub")
                    print(f"   📦 Model: {model_path_to_load}")
                    print(f"   💡 First load: downloads from HuggingFace (~6-10s)")
                    print(f"   💡 Next loads: uses cached model (~1-2s)")
                    print(f"   📍 Cache: ~/.cache/huggingface/hub/")
                else:
                    print(f"🔍 Loading HurricaneOD_beta model from: Local File")
                    print(f"   📂 Source: {model_source}")
                    # Convert to absolute path for display (local files only)
                    abs_path = Path(model_path_to_load).resolve()
                    print(f"   📍 Path: {abs_path}")
                print(f"   🖥️  Device: {device}")
                import time
                det_start = time.time()
                try:
                    # For HuggingFace models, download via huggingface_hub to track downloads
                    if is_huggingface:
                        try:
                            from huggingface_hub import hf_hub_download, list_repo_files
                            print(f"   ⏳ Checking available files in HuggingFace repo...")
                            
                            # List all files in the repository
                            try:
                                repo_files = list_repo_files(repo_id=model_path_to_load, repo_type="model")
                                pt_files = [f for f in repo_files if f.endswith('.pt')]
                                print(f"   📋 Found .pt files: {pt_files}")
                            except:
                                pt_files = []
                            
                            # Try multiple possible filenames (most likely first)
                            possible_filenames = [
                                "HurricaneOD_beta.pt",  # Primary filename for HurricaneOD
                                "best.pt",
                                "model.pt",
                                "weights/best.pt"
                            ]
                            
                            # If we found files, use the first .pt file
                            if pt_files:
                                possible_filenames = pt_files + possible_filenames
                            
                            local_model_path = None
                            for filename in possible_filenames:
                                try:
                                    print(f"   ⏳ Trying to download: {filename}")
                                    local_model_path = hf_hub_download(
                                        repo_id=model_path_to_load,
                                        filename=filename,
                                        repo_type="model"
                                    )
                                    print(f"   ✅ Downloaded: {filename}")
                                    print(f"   📥 Saved to: {local_model_path}")
                                    break
                                except Exception as e:
                                    print(f"   ⚠️  {filename} not found: {e}")
                                    continue
                            
                            if local_model_path:
                                model_path_to_load = local_model_path
                            else:
                                raise FileNotFoundError(
                                    f"No YOLO model file (.pt) found in {model_path_to_load}\n"
                                    f"   Please upload one of these files to your HuggingFace repo:\n"
                                    f"   - best.pt (recommended)\n"
                                    f"   - HurricaneOD_beta.pt\n"
                                    f"   - model.pt\n"
                                    f"   Available files: {repo_files if 'repo_files' in locals() else 'unknown'}"
                                )
                                
                        except ImportError:
                            print(f"   ⚠️  huggingface_hub not installed, downloads won't be tracked")
                            print(f"   💡 Install with: pip install huggingface_hub")
                            raise
                        except Exception as e:
                            print(f"   ❌ Could not download from HuggingFace: {e}")
                            print(f"   💡 Make sure you uploaded a .pt file to the repository")
                            print(f"   💡 Repository: https://huggingface.co/{model_path_to_load}")
                            raise
                    
                    self.model = YOLO(model_path_to_load)
                    if self.model is None:
                        raise RuntimeError(f"YOLO returned None when loading {model_path_to_load}")
                except Exception as load_error:
                    raise RuntimeError(f"Failed to load YOLOv8 model from {model_path_to_load}: {load_error}")
                det_elapsed = time.time() - det_start
                print(f"   ✅ HurricaneOD_beta model loaded successfully! (took {det_elapsed:.2f} seconds)")
            else:
                # Fallback to pretrained YOLOv8 model (not recommended)
                model_name = f"yolov8{self.model_size}.pt"
                print(f"⚠️  HurricaneOD_beta model not found, using pretrained YOLOv8{self.model_size}")
                print(f"   Device: {device}")
                print(f"   ⏳ Downloading pretrained model (first time only, ~6MB)...")
                print(f"   Note: For better results, train and use HurricaneOD_beta model")
                import time
                det_start = time.time()
                try:
                    self.model = YOLO(model_name)
                    if self.model is None:
                        raise RuntimeError(f"YOLO returned None when loading {model_name}")
                except Exception as load_error:
                    raise RuntimeError(f"Failed to load YOLOv8 model {model_name}: {load_error}")
                det_elapsed = time.time() - det_start
                print(f"   ✅ YOLOv8 detector loaded (pretrained) (took {det_elapsed:.2f} seconds)")
            
            # Verify model is loaded before marking as loaded
            if self.model is None:
                raise RuntimeError("YOLOv8 model is None after loading. Model loading failed.")
            
            self._is_loaded = True
            
        except ImportError:
            raise ImportError(
                "ultralytics not installed. Install with: pip install ultralytics\n"
                "Note: YOLOv8 will use pretrained COCO model. For better results, "
                "fine-tune on Thai license plate dataset."
            )
        except Exception as e:
            raise RuntimeError(f"Failed to load YOLOv8 model: {e}")
    
    def detect(self, image: Image.Image) -> List[Dict[str, Any]]:
        """
        Detect license plates in image
        
        Args:
            image: PIL Image
            
        Returns:
            List of detections with bounding boxes, confidence scores
            Format: [{"bbox": [x1, y1, x2, y2], "confidence": float, "class": int}, ...]
        """
        if not self._is_loaded:
            self.load()
        
        # Check if model is loaded
        if self.model is None:
            raise RuntimeError(
                "Plate detector model is not loaded. "
                "Please ensure YOLOv8 model was loaded successfully."
            )
        
        # Convert PIL to numpy array
        img_array = np.array(image)
        
        # Run detection
        results = self.model.predict(
            img_array,
            conf=self.conf_threshold,
            iou=self.iou_threshold,
            verbose=False
        )
        
        detections = []
        if results and len(results) > 0:
            result = results[0]
            
            # Extract boxes, confidences, classes
            if result.boxes is not None:
                boxes = result.boxes.xyxy.cpu().numpy()  # [x1, y1, x2, y2]
                confidences = result.boxes.conf.cpu().numpy()
                classes = result.boxes.cls.cpu().numpy().astype(int)
                
                for i in range(len(boxes)):
                    detections.append({
                        "bbox": boxes[i].tolist(),
                        "confidence": float(confidences[i]),
                        "class": int(classes[i])
                    })
        
        return detections
    
    def crop_plate(self, image: Image.Image, bbox: List[float], padding: int = 10) -> Image.Image:
        """
        Crop license plate region from image
        
        Args:
            image: PIL Image
            bbox: Bounding box [x1, y1, x2, y2]
            padding: Padding pixels around the bbox
            
        Returns:
            Cropped PIL Image
        """
        x1, y1, x2, y2 = bbox
        
        # Add padding
        width, height = image.size
        x1 = max(0, int(x1) - padding)
        y1 = max(0, int(y1) - padding)
        x2 = min(width, int(x2) + padding)
        y2 = min(height, int(y2) + padding)
        
        # Crop
        cropped = image.crop((x1, y1, x2, y2))
        return cropped
    
    def _get_coco_class_name(self, class_id: int) -> str:
        """Get COCO class name from class ID"""
        coco_classes = [
            'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
            'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
            'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
            'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
            'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
            'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
            'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
            'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
            'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse',
            'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
            'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
            'toothbrush'
        ]
        if 0 <= class_id < len(coco_classes):
            return coco_classes[class_id]
        return f"class_{class_id}"
    
    def _select_best_plate_region(self, image: Image.Image, detections: List[Dict]) -> Optional[Dict]:
        """
        Select the best detection for license plate region
        
        Strategy:
        1. Prefer detections in bottom-center region (where plates usually are)
        2. Prefer vehicle classes (car, truck, bus, motorcycle)
        3. Use aspect ratio filtering (plates are usually wider than tall)
        4. Prefer detections with higher confidence
        """
        if not detections:
            return None
        
        width, height = image.size
        center_x = width / 2
        bottom_y = height * 0.7  # Bottom 30% of image
        
        # Vehicle class IDs in COCO
        vehicle_classes = {2: 'car', 3: 'motorcycle', 5: 'bus', 7: 'truck'}
        
        scored_detections = []
        for det in detections:
            bbox = det["bbox"]
            x1, y1, x2, y2 = bbox
            center_bbox_x = (x1 + x2) / 2
            center_bbox_y = (y1 + y2) / 2
            
            # Calculate score
            score = det["confidence"]
            
            # Bonus for vehicle classes
            class_id = det.get("class", -1)
            if class_id in vehicle_classes:
                score *= 1.5
            
            # Bonus for bottom-center region
            distance_from_center = abs(center_bbox_x - center_x) / width
            distance_from_bottom = abs(center_bbox_y - bottom_y) / height
            
            if distance_from_center < 0.3:  # Within 30% of center
                score *= 1.3
            if center_bbox_y > height * 0.5:  # In bottom half
                score *= 1.2
            
            # Check aspect ratio (plates are usually wider)
            bbox_width = x2 - x1
            bbox_height = y2 - y1
            aspect_ratio = bbox_width / bbox_height if bbox_height > 0 else 1
            
            if 1.5 < aspect_ratio < 5.0:  # Reasonable plate aspect ratio
                score *= 1.4
            
            scored_detections.append({
                **det,
                "score": score,
                "class_name": self._get_coco_class_name(class_id)
            })
        
        # Sort by score
        scored_detections.sort(key=lambda x: x["score"], reverse=True)
        
        # Return best detection
        if scored_detections:
            return scored_detections[0]
        return None
    
    def _fallback_region_detection(self, image: Image.Image) -> Optional[Image.Image]:
        """
        Fallback method: Use bottom-center region if YOLOv8 doesn't detect properly
        
        This assumes license plate is in the bottom-center region of the image
        """
        width, height = image.size
        
        # Crop bottom-center region (typical plate location)
        # Use bottom 30% and center 60% of image
        x1 = int(width * 0.2)
        y1 = int(height * 0.7)
        x2 = int(width * 0.8)
        y2 = int(height * 0.95)
        
        cropped = image.crop((x1, y1, x2, y2))
        return cropped
    
    def _contour_based_detection(self, image: Image.Image) -> Optional[Image.Image]:
        """
        Alternative detection using contour detection
        
        Looks for rectangular regions that might be license plates
        """
        try:
            import cv2
            
            # Convert PIL to OpenCV format
            img_array = np.array(image.convert("RGB"))
            img_cv = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
            gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
            
            # Apply threshold
            _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            
            # Find contours
            contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            
            width, height = image.size
            best_contour = None
            best_score = 0
            
            for contour in contours:
                # Get bounding rect
                x, y, w, h = cv2.boundingRect(contour)
                
                # Filter by size and aspect ratio (typical plate dimensions)
                area = w * h
                aspect_ratio = w / h if h > 0 else 0
                
                # Plate-like characteristics
                if (area > width * height * 0.01 and  # At least 1% of image
                    area < width * height * 0.3 and   # At most 30% of image
                    1.5 < aspect_ratio < 5.0 and      # Reasonable aspect ratio
                    y > height * 0.5):                 # In bottom half
                    
                    # Score based on position and size
                    score = area * (1.0 - abs(aspect_ratio - 2.5) / 2.5)
                    if y > height * 0.6:  # Bonus for bottom region
                        score *= 1.5
                    
                    if score > best_score:
                        best_score = score
                        best_contour = (x, y, w, h)
            
            if best_contour:
                x, y, w, h = best_contour
                # Add padding
                padding = 10
                x1 = max(0, x - padding)
                y1 = max(0, y - padding)
                x2 = min(width, x + w + padding)
                y2 = min(height, y + h + padding)
                
                cropped = image.crop((x1, y1, x2, y2))
                return cropped
            
        except ImportError:
            pass  # OpenCV not available
        except Exception:
            pass  # Contour detection failed
        
        return None
    
    def detect_and_crop(self, image: Image.Image, return_all: bool = False) -> Tuple[Optional[Image.Image], List[Dict]]:
        """
        Detect and crop the best license plate from image
        
        Uses smart selection strategy:
        1. Try YOLOv8 detection with smart region selection
        2. Fallback to bottom-center region if detection fails
        
        Args:
            image: PIL Image
            return_all: If True, return all detections, else return only the best one
            
        Returns:
            Tuple of (cropped_image, all_detections)
            If no detection, returns (None, [])
        """
        detections = self.detect(image)
        
        if return_all:
            if not detections:
                return [], []
            # Return all cropped plates
            cropped_images = []
            for det in detections:
                cropped = self.crop_plate(image, det["bbox"])
                cropped_images.append(cropped)
            return cropped_images, detections
        
        # Select best detection using smart strategy
        best_det = self._select_best_plate_region(image, detections)
        
        if best_det:
            # Use selected detection
            cropped = self.crop_plate(image, best_det["bbox"], padding=15)
            return cropped, [best_det]
        else:
            # Try contour-based detection first
            print("   ⚠️ No YOLOv8 detection found, trying contour-based detection...")
            cropped = self._contour_based_detection(image)
            
            if cropped:
                # Create detection info for contour method
                width, height = image.size
                contour_det = {
                    "bbox": [0, 0, width, height],  # Approximate
                    "confidence": 0.6,
                    "class": -1,
                    "class_name": "contour_detection",
                    "score": 0.6,
                    "method": "contour"
                }
                return cropped, [contour_det]
            
            # Final fallback: use bottom-center region
            print("   ⚠️ Contour detection failed, using fallback region detection")
            cropped = self._fallback_region_detection(image)
            if cropped:
                # Create dummy detection info for fallback
                width, height = image.size
                fallback_det = {
                    "bbox": [width * 0.2, height * 0.7, width * 0.8, height * 0.95],
                    "confidence": 0.5,
                    "class": -1,
                    "class_name": "fallback_region",
                    "score": 0.5,
                    "method": "region"
                }
                return cropped, [fallback_det]
        
        return None, []


class OCRModel:
    """
    OCR Model wrapper for Hurricane OCR / Typhoon OCR
    Supports multiple device modes: auto, gpu, cpu, hybrid
    Supports: Base model or Fine-tuned LoRA model
    """
    
    BASE_MODEL_NAME = "scb10x/typhoon-ocr1.5-2b"
    MAX_IMAGE_SIZE = 1024  # Reduced from 1800 for lower RAM usage
    
    def __init__(self, device_mode: str = None, use_8bit: bool = True, max_gpu_memory: str = None,
                 hurricane_model_path: str = None, use_detection: bool = True):
        """
        Initialize the OCR model
        
        Args:
            device_mode: "auto", "gpu", "cpu", or "hybrid" (default: use global DEVICE_MODE)
            use_8bit: Whether to use 8-bit quantization for memory efficiency
            max_gpu_memory: Maximum GPU memory to use (e.g., "6GB")
            hurricane_model_path: Path to fine-tuned Hurricane OCR model (LoRA)
            use_detection: Enable YOLOv8 plate detection before OCR (default: True)
        """
        self.device_mode = device_mode or DEVICE_MODE
        self.use_8bit = use_8bit
        self.max_gpu_memory = max_gpu_memory or MAX_GPU_MEMORY
        self.hurricane_model_path = hurricane_model_path or HURRICANE_MODEL_PATH
        self.use_detection = use_detection
        self.model = None
        self.processor = None
        self.tokenizer = None
        self.plate_detector = None
        self._is_loaded = False
        self._device = None
        self._device_info = get_device_info()
        self._mode_used = None
        self._using_hurricane = False
    
    def load(self) -> None:
        """Load the model and processor"""
        if self._is_loaded:
            print("✅ Model already loaded (using cached instance).")
            return
        
        import time
        start_time = time.time()
        
        # Check if Hurricane OCR model exists and what type it is
        # Support both local paths and HuggingFace Hub paths (username/model-name)
        use_hurricane = False
        is_huggingface_path = False
        
        if self.hurricane_model_path:
            # Check if it's a HuggingFace Hub path (format: username/model-name)
            # HuggingFace paths have exactly one "/" and no backslashes or dots at start
            if "/" in self.hurricane_model_path and "\\" not in self.hurricane_model_path:
                parts = self.hurricane_model_path.split("/")
                if len(parts) == 2 and not self.hurricane_model_path.startswith("."):
                    # This looks like a HuggingFace path (e.g., "Rattatammanoon/hurricane-ocr-tlpr-v1-LoRA")
                    is_huggingface_path = True
                    use_hurricane = True
                    print(f"   📦 Detected HuggingFace model path: {self.hurricane_model_path}")
                else:
                    # Local path with slashes
                    use_hurricane = os.path.exists(self.hurricane_model_path)
            else:
                # Local path
                use_hurricane = os.path.exists(self.hurricane_model_path)
        
        # Check if it's LoRA adapter or merged model
        is_lora_adapter = False
        is_merged_model = False
        merged_model_path = None
        adapter_config_path = None  # Initialize to avoid UnboundLocalError
        is_trocr_base = False  # Track if base model is TrOCR
        if use_hurricane:
            # For HuggingFace paths, we need to download and check the files
            # For local paths, we can check directly
            if is_huggingface_path:
                # HuggingFace model - assume it's a LoRA adapter
                # (HuggingFace will auto-detect type when loading)
                is_lora_adapter = True
                print(f"   📦 HuggingFace model detected - will load as LoRA adapter")
                print(f"   💡 Model will be downloaded from HuggingFace Hub on first load")
            else:
                # Local path - check files to determine type
                # Check for merged model first (has config.json but no adapter_config.json)
                config_json_path = os.path.join(self.hurricane_model_path, "config.json")
                adapter_config_path = os.path.join(self.hurricane_model_path, "adapter_config.json")
            
                if os.path.exists(config_json_path) and not os.path.exists(adapter_config_path):
                    # This might be a merged model (full model, not just adapter)
                    is_merged_model = True
                    merged_model_path = self.hurricane_model_path
                    print(f"   📋 Detected merged model (full model, no base model needed)")
                    print(f"   📋 Model path: {merged_model_path}")
                elif os.path.exists(adapter_config_path):
                    # Check for pre-merged model in merged/ subdirectory
                    merged_subdir = os.path.join(self.hurricane_model_path, "merged")
                    if os.path.exists(merged_subdir) and os.path.exists(os.path.join(merged_subdir, "config.json")):
                        is_merged_model = True
                        merged_model_path = merged_subdir
                        print(f"   📋 Found pre-merged model in merged/ subdirectory")
                        print(f"   📋 Will use merged model (no base model loading needed)")
                    else:
                        is_lora_adapter = True
                        try:
                            import json
                            with open(adapter_config_path, 'r', encoding='utf-8') as f:
                                adapter_config = json.load(f)
                                base_model = adapter_config.get('base_model_name_or_path', '')
                                is_trocr_base = 'trocr' in base_model.lower()
                                model_type_str = "Thai TrOCR" if is_trocr_base else "Vision-Language (Qwen3VL/Typhoon)"
                                print(f"   📋 Detected LoRA adapter model")
                                print(f"   📋 Base model: {base_model}")
                                print(f"   📋 Model type: {model_type_str}")
                                print(f"   💡 Note: Base model will be loaded first, then LoRA adapter")
                                print(f"   💡 Base model is cached by HuggingFace (~/.cache/huggingface/)")
                                print(f"   💡 First load: downloads base model (~2-5GB, 30-60s)")
                                print(f"   💡 Next loads: uses cached base model (~5-10s)")
                        except Exception as e:
                            print(f"   ⚠️  Warning: Could not read adapter config: {e}")
        
        # Use Typhoon OCR (Transformers) with or without LoRA
        # Check device availability
        print("=" * 60)
        if use_hurricane and is_merged_model:
            print("🌀 HURRICANE OCR - Merged Model (Full Model)")
            print(f"   Model Path: {merged_model_path}")
            print(f"   Type: Merged Model (LoRA merged into base model)")
            print(f"   ✅ No base model loading needed!")
        elif use_hurricane and is_lora_adapter:
            print("🌀 HURRICANE OCR - Fine-tuned Model (Typhoon OCR + LoRA)")
            print(f"   Model Path: {self.hurricane_model_path}")
            print(f"   Type: LoRA Adapter (Parameter-Efficient Fine-Tuning)")
            print(f"   ⚠️  Will load base model first, then LoRA adapter")
            print(f"   💡 Base model is cached by HuggingFace (~/.cache/huggingface/)")
            print(f"   💡 First load: downloads base model (~2-5GB, 30-60s)")
            print(f"   💡 Next loads: uses cached base model (~5-10s)")
        elif use_hurricane:
            print("🌀 HURRICANE OCR - Fine-tuned Model (Typhoon)")
            print(f"   Model Path: {self.hurricane_model_path}")
        else:
            print("🌊 TYPHOON OCR - Base Model")
            if self.hurricane_model_path:
                print(f"   ⚠️  Hurricane model not found: {self.hurricane_model_path}")
                print(f"   ⚠️  Falling back to base Typhoon OCR model")
        print("=" * 60)
        print("🖥️  Device Information:")
        print(f"   CUDA Available: {self._device_info['cuda_available']}")
        print(f"   CPU Cores: {self._device_info['cpu_count']}")
        if self._device_info['cuda_available']:
            print(f"   GPU: {self._device_info['cuda_device_name']}")
            print(f"   VRAM Total: {self._device_info['cuda_memory_total']}")
            print(f"   VRAM Free: {self._device_info['cuda_memory_free']}")
        print(f"   Requested Mode: {self.device_mode.upper()}")
        print("=" * 60)
        
        # Determine actual device configuration
        device_map = self._get_device_map()
        
        print("\n📦 Loading processor and tokenizer...")
        # If merged model, try loading from merged model path first
        # For LoRA adapter: use base model (from adapter_config) for processor/tokenizer
        model_path_to_load = merged_model_path if (is_merged_model and merged_model_path) else self.BASE_MODEL_NAME
        processor_model_path = model_path_to_load
        if use_hurricane and is_lora_adapter and adapter_config_path and os.path.exists(adapter_config_path):
            try:
                import json
                with open(adapter_config_path, "r", encoding="utf-8") as f:
                    adapter_cfg = json.load(f)
                    base_model_from_adapter = adapter_cfg.get("base_model_name_or_path")
                    if base_model_from_adapter:
                        processor_model_path = base_model_from_adapter
                        print(f"   📦 Using base model for processor/tokenizer: {processor_model_path}")
            except Exception as e:
                print(f"   ⚠️  Could not read adapter config for processor: {e}")
        
        try:
            # Load tokenizer FIRST with fix_mistral_regex=True to avoid warning
            # This prevents the warning from being shown when processor loads tokenizer
            try:
                print(f"   Loading tokenizer with fix_mistral_regex=True...")
                self.tokenizer = AutoTokenizer.from_pretrained(
                    processor_model_path,
                    trust_remote_code=True,
                    fix_mistral_regex=True  # Fix tokenizer regex pattern issue - MUST be set here
                )
                print(f"   ✓ Tokenizer loaded with fix_mistral_regex=True")
            except Exception as tokenizer_error:
                print(f"   ⚠️  Could not load tokenizer separately: {tokenizer_error}")
                self.tokenizer = None
            
            # Load processor (will use tokenizer if already loaded)
            self.processor = AutoProcessor.from_pretrained(
                processor_model_path,
                trust_remote_code=True
            )
            
            # If we loaded tokenizer separately, update processor's tokenizer
            if self.tokenizer is not None:
                self.processor.tokenizer = self.tokenizer
                print(f"   ✓ Processor loaded and updated with fixed tokenizer")
            else:
                # Fallback: use processor's tokenizer and try to fix it
                self.tokenizer = self.processor.tokenizer
                # Try to set fix_mistral_regex if available
                if hasattr(self.tokenizer, 'fix_mistral_regex'):
                    self.tokenizer.fix_mistral_regex = True
                print(f"   ✓ Processor loaded (using processor's tokenizer)")
            
            print(f"   ✓ Processor and tokenizer loaded from: {model_path_to_load}")
        except Exception as e:
            print(f"   ⚠️  Could not load from {model_path_to_load}: {e}")
            print(f"   ⚠️  Falling back to base model")
            try:
                # Load tokenizer FIRST with fix_mistral_regex=True
                try:
                    print(f"   Loading base model tokenizer with fix_mistral_regex=True...")
                    self.tokenizer = AutoTokenizer.from_pretrained(
                        self.BASE_MODEL_NAME,
                        trust_remote_code=True,
                        fix_mistral_regex=True  # Fix tokenizer regex pattern issue
                    )
                    print(f"   ✓ Base tokenizer loaded with fix_mistral_regex=True")
                except Exception as tokenizer_error:
                    print(f"   ⚠️  Could not load base tokenizer separately: {tokenizer_error}")
                    self.tokenizer = None
                
                # Load processor
                self.processor = AutoProcessor.from_pretrained(
                    self.BASE_MODEL_NAME, 
                    trust_remote_code=True
                )
                
                # Update processor's tokenizer if we loaded it separately
                if self.tokenizer is not None:
                    self.processor.tokenizer = self.tokenizer
                    print(f"   ✓ Base processor loaded and updated with fixed tokenizer")
                else:
                    self.tokenizer = self.processor.tokenizer
                    if hasattr(self.tokenizer, 'fix_mistral_regex'):
                        self.tokenizer.fix_mistral_regex = True
                    print(f"   ✓ Base processor loaded (using processor's tokenizer)")
            except Exception as fallback_error:
                print(f"   ❌ Failed to load base model: {fallback_error}")
                raise
        
        print("   ✓ Processor and tokenizer loaded successfully")
        
        print(f"\n🤖 Loading model in {self._mode_used.upper()} mode...")
        
        # Load model based on configuration
        if is_merged_model and merged_model_path:
            # Load merged model directly (no base model + LoRA needed)
            print(f"   📦 Loading merged model from: {merged_model_path}")
            if self._mode_used == "hybrid" and self._device_info['cuda_available']:
                self._load_merged_model(merged_model_path, device_map, mode="hybrid")
            elif self._mode_used == "gpu" and self._device_info['cuda_available']:
                self._load_merged_model(merged_model_path, device_map, mode="gpu")
            else:
                self._load_merged_model(merged_model_path, device_map="cpu", mode="cpu")
        else:
            # Load base model first, then LoRA adapter
            # Determine base model name for LoRA adapter
            base_model_name = self.BASE_MODEL_NAME
            if use_hurricane and is_lora_adapter:
                try:
                    import json
                    adapter_config_path = os.path.join(self.hurricane_model_path, "adapter_config.json")
                    with open(adapter_config_path, 'r', encoding='utf-8') as f:
                        adapter_config = json.load(f)
                        base_model_name = adapter_config.get('base_model_name_or_path', self.BASE_MODEL_NAME)
                        is_trocr_base = 'trocr' in base_model_name.lower()
                except:
                    pass
            
            # Check for offline mode or local base model
            if USE_OFFLINE_MODE or LOCAL_BASE_MODEL_PATH:
                # Use local base model if provided
                if LOCAL_BASE_MODEL_PATH and os.path.exists(LOCAL_BASE_MODEL_PATH):
                    print(f"   📦 Using local base model: {LOCAL_BASE_MODEL_PATH}")
                    base_model_name = LOCAL_BASE_MODEL_PATH
                    is_trocr_base = 'trocr' in base_model_name.lower() or os.path.exists(os.path.join(LOCAL_BASE_MODEL_PATH, "config.json"))
                elif USE_OFFLINE_MODE:
                    print(f"   📦 Offline mode enabled - using cached model: {base_model_name}")
                    print(f"   ⚠️  If model not cached, loading will fail")
            
            if self._mode_used == "hybrid" and self._device_info['cuda_available']:
                self._load_hybrid_mode(device_map, base_model_name=base_model_name, is_trocr=is_trocr_base)
            elif self._mode_used == "gpu" and self._device_info['cuda_available']:
                self._load_gpu_mode(device_map, base_model_name=base_model_name, is_trocr=is_trocr_base)
            else:
                self._load_cpu_mode(base_model_name=base_model_name, is_trocr=is_trocr_base)
            
            # Load Hurricane OCR LoRA adapter if available
            if use_hurricane and is_lora_adapter:
                self._load_hurricane_adapter()
        
        model_name = "Hurricane OCR" if self._using_hurricane else "Typhoon OCR"
        print(f"\n✅ {model_name} loaded successfully!")
        print(f"   Mode: {self._mode_used.upper()}")
        print(f"   Fine-tuned: {'Yes (LoRA)' if self._using_hurricane else 'No (Base)'}")
        if hasattr(self.model, 'hf_device_map'):
            devices_used = set(str(v) for v in self.model.hf_device_map.values())
            print(f"   Devices: {', '.join(devices_used)}")
        
        # Load YOLOv8 detector if enabled
        if self.use_detection:
            try:
                # Priority 1: Use HuggingFace model (HURRICANE_OD_MODEL_PATH)
                # Priority 2: Check for local HurricaneOD_beta
                # Priority 3: Fallback to pretrained YOLOv8n
                
                model_path_to_use = HURRICANE_OD_MODEL_PATH
                
                # If HuggingFace path not set, check for local models
                if not model_path_to_use:
                    hurricaneod_path = Path("hurricane_ocr_model/HurricaneOD_beta/HurricaneOD_beta.pt")
                    training_best_path = Path("HurricaneOD/HurricaneOD_beta/weights/best.pt")
                    training_last_path = Path("HurricaneOD/HurricaneOD_beta/weights/last.pt")
                    
                    if hurricaneod_path.exists():
                        model_path_to_use = str(hurricaneod_path)
                    elif training_best_path.exists():
                        model_path_to_use = str(training_best_path)
                    elif training_last_path.exists():
                        model_path_to_use = str(training_last_path)
                
                if model_path_to_use:
                    self.plate_detector = PlateDetector(
                        model_size="n", 
                        conf_threshold=0.25,
                        model_path=model_path_to_use
                    )
                else:
                    print(f"   ⚠️ HurricaneOD_beta model not found, using pretrained YOLOv8n")
                    print(f"   Expected: HuggingFace model or local paths")
                    print(f"     - HuggingFace: {HURRICANE_OD_MODEL_PATH}")
                    print(f"     - Local: hurricane_ocr_model/HurricaneOD_beta/HurricaneOD_beta.pt")
                    self.plate_detector = PlateDetector(model_size="n", conf_threshold=0.25)
                
                device = "cuda" if self._device_info['cuda_available'] else "cpu"
                self.plate_detector.load(device=device)
            except Exception as e:
                print(f"   ⚠️ Warning: Could not load YOLOv8 detector: {e}")
                print("   Continuing without detection (will process full image)")
                self.use_detection = False
        
        self._is_loaded = True
        
        elapsed = time.time() - start_time
        print(f"\n⏱️  Total loading time: {elapsed:.2f} seconds")
    
    def _load_hurricane_adapter(self) -> None:
        """Load Hurricane OCR LoRA adapter"""
        try:
            from peft import PeftModel
            
            print(f"\n🌀 Loading Hurricane OCR adapter from: {self.hurricane_model_path}")
            print(f"   ⚠️  Note: LoRA adapter requires base model to be loaded first")
            print(f"   💡 This is normal - LoRA adapters work on top of base models")
            print(f"   💡 Base model is cached by HuggingFace (~/.cache/huggingface/)")
            print(f"   💡 First load: downloads base model (~2-5GB, 30-60s)")
            print(f"   💡 Next loads: uses cached base model (~5-10s)")
            print(f"   💡 Tip: Use merge_and_unload() to create merged model (faster loading)")
            
            # Load the LoRA adapter
            self.model = PeftModel.from_pretrained(
                self.model,
                self.hurricane_model_path,
                is_trainable=False  # Inference only
            )
            
            # Option to merge adapter for faster inference (saves merged model)
            # Uncomment the code below to merge and save merged model (one-time operation)
            # This will create a merged model that doesn't need base model loading
            # After merging, set HURRICANE_MODEL_PATH to the merged/ directory
            # 
            # print("   🔄 Merging LoRA adapter into base model...")
            # print("   ⏳ This may take a few minutes...")
            # merged_model = self.model.merge_and_unload()
            # merged_path = os.path.join(self.hurricane_model_path, "merged")
            # os.makedirs(merged_path, exist_ok=True)
            # print(f"   💾 Saving merged model to: {merged_path}")
            # merged_model.save_pretrained(merged_path)
            # self.processor.save_pretrained(merged_path)
            # print("   ✅ Merged model saved!")
            # print(f"   💡 Next time, set HURRICANE_MODEL_PATH to: {merged_path}")
            # print(f"   💡 This will load faster (no base model needed)")
            
            self._using_hurricane = True
            print("   ✓ Hurricane OCR adapter loaded successfully!")
            
        except ImportError:
            print("   ⚠️ PEFT not installed. Install with: pip install peft")
            print("   Using base Typhoon OCR model instead.")
        except Exception as e:
            print(f"   ⚠️ Failed to load Hurricane adapter: {e}")
            print("   Using base Typhoon OCR model instead.")
    
    def _get_device_map(self) -> Any:
        """Determine device map based on mode"""
        if self.device_mode == "cpu":
            self._mode_used = "cpu"
            self._device = "cpu"
            return "cpu"
        
        if not self._device_info['cuda_available']:
            print("   ⚠️ GPU not available, falling back to CPU")
            self._mode_used = "cpu"
            self._device = "cpu"
            return "cpu"
        
        if self.device_mode == "gpu":
            self._mode_used = "gpu"
            self._device = "cuda"
            return "cuda:0"
        
        if self.device_mode == "hybrid":
            self._mode_used = "hybrid"
            self._device = "cuda"
            # Auto device map with memory limits
            return "auto"
        
        # Auto mode
        self._mode_used = "gpu" if self._device_info['cuda_available'] else "cpu"
        self._device = "cuda" if self._device_info['cuda_available'] else "cpu"
        return "auto" if self._device_info['cuda_available'] else "cpu"
    
    def _load_hybrid_mode(self, device_map, base_model_name=None, is_trocr=False):
        """Load model in hybrid GPU+CPU mode"""
        print("   Using HYBRID mode (GPU + CPU)")
        print(f"   Max GPU Memory: {self.max_gpu_memory}")
        
        model_name = base_model_name or self.BASE_MODEL_NAME
        model_class = AutoModelForVision2Seq if (is_trocr and AutoModelForVision2Seq is not None) else AutoModelForImageTextToText

        if self.use_8bit:
            if BitsAndBytesConfig is None:
                raise ImportError("bitsandbytes or compatible BitsAndBytesConfig not available. Install bitsandbytes and retry (pip install bitsandbytes).")
            print("   Using 4-bit quantization (NF4) + CPU offloading")
            bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_compute_dtype=torch.bfloat16,
                bnb_4bit_use_double_quant=True,
                bnb_4bit_quant_type="nf4",
                llm_int8_enable_fp32_cpu_offload=True  # Enable CPU offloading
            )
            
            self.model = model_class.from_pretrained(
                model_name,
                quantization_config=bnb_config,
                device_map=device_map,
                max_memory={0: self.max_gpu_memory, "cpu": "4GB"},
                trust_remote_code=True,
                offload_folder="offload_weights"
            )
        else:
            print("   Using float16 + CPU offloading")
            self.model = model_class.from_pretrained(
                model_name,
                device_map=device_map,
                max_memory={0: self.max_gpu_memory, "cpu": "4GB"},
                trust_remote_code=True,
                dtype=torch.float16,
                offload_folder="offload_weights"
            )
    
    def _load_gpu_mode(self, device_map, base_model_name=None, is_trocr=False):
        """Load model on GPU only"""
        print("   Using GPU-only mode")
        
        model_name = base_model_name or self.BASE_MODEL_NAME
        model_class = AutoModelForVision2Seq if (is_trocr and AutoModelForVision2Seq is not None) else AutoModelForImageTextToText
        
        # Prepare loading kwargs
        load_kwargs = {
            "device_map": device_map,
            "trust_remote_code": True
        }
        
        # Add offline mode if enabled
        if USE_OFFLINE_MODE:
            load_kwargs["local_files_only"] = True
            print("   📦 Offline mode: local_files_only=True")
        
        if self.use_8bit:
            print("   Using 4-bit quantization (NF4)")
            bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_compute_dtype=torch.bfloat16,
                bnb_4bit_use_double_quant=True,
                bnb_4bit_quant_type="nf4",
                llm_int8_enable_fp32_cpu_offload=False
            )
            load_kwargs["quantization_config"] = bnb_config
            self.model = model_class.from_pretrained(model_name, **load_kwargs)
        else:
            print("   Using float16 precision")
            load_kwargs["dtype"] = torch.float16
            self.model = model_class.from_pretrained(model_name, **load_kwargs)
    
    def _load_merged_model(self, model_path: str, device_map: Any, mode: str = "auto"):
        """Load merged model directly (no base model + LoRA needed)"""
        print(f"   Using merged model (no base model loading needed)")
        
        if mode == "hybrid" and self._device_info['cuda_available']:
            print("   Using HYBRID mode (GPU + CPU)")
            print(f"   Max GPU Memory: {self.max_gpu_memory}")
            self.model = AutoModelForImageTextToText.from_pretrained(
                model_path,
                device_map=device_map,
                max_memory={0: self.max_gpu_memory, "cpu": "4GB"},
                trust_remote_code=True,
                dtype=torch.float16,
                offload_folder="offload_weights"
            )
        elif mode == "gpu" and self._device_info['cuda_available']:
            print("   Using GPU-only mode")
            print("   Using float16 precision")
            self.model = AutoModelForImageTextToText.from_pretrained(
                model_path,
                device_map=device_map,
                trust_remote_code=True,
                dtype=torch.float16
            )
        else:
            print("   Using CPU-only mode")
            print("   Using float32 precision")
            self.model = AutoModelForImageTextToText.from_pretrained(
                model_path,
                device_map="cpu",
                trust_remote_code=True,
                dtype=torch.float32
            )
        
        self._using_hurricane = True
    
    def _load_cpu_mode(self, base_model_name=None, is_trocr=False):
        """Load model on CPU only"""
        print("   Using CPU-only mode")
        print("   Using float32 precision")
        self._mode_used = "cpu"
        self._device = "cpu"
        
        model_name = base_model_name or self.BASE_MODEL_NAME
        model_class = AutoModelForVision2Seq if (is_trocr and AutoModelForVision2Seq is not None) else AutoModelForImageTextToText
        
        # Prepare loading kwargs
        load_kwargs = {
            "device_map": "cpu",
            "trust_remote_code": True,
            "dtype": torch.float32
        }
        
        # Add offline mode if enabled
        if USE_OFFLINE_MODE:
            load_kwargs["local_files_only"] = True
            print("   📦 Offline mode: local_files_only=True")
        
        self.model = model_class.from_pretrained(model_name, **load_kwargs)
    
    @property
    def device(self) -> str:
        """Get current device"""
        return self._device
    
    @property
    def mode_used(self) -> str:
        """Get actual mode being used"""
        return self._mode_used
    
    @property
    def device_info(self) -> dict:
        """Get device information"""
        return self._device_info
    
    def resize_image(self, img: Image.Image, max_size: int = None) -> Image.Image:
        """
        Resize image if it exceeds max dimensions
        
        Args:
            img: PIL Image object
            max_size: Maximum dimension size (default: MAX_IMAGE_SIZE)
            
        Returns:
            Resized PIL Image
        """
        if max_size is None:
            max_size = self.MAX_IMAGE_SIZE
            
        width, height = img.size
        
        if width > max_size or height > max_size:
            if width >= height:
                scale = max_size / float(width)
                new_size = (max_size, int(height * scale))
            else:
                scale = max_size / float(height)
                new_size = (int(width * scale), max_size)
            
            img = img.resize(new_size, Image.Resampling.LANCZOS)
            print(f"Original size: {width, height} ==> Resized to: {img.size}")
        
        return img
    
    def perform_ocr(
        self, 
        image: Image.Image, 
        prompt: str = None,
        max_new_tokens: int = 128,  # Reduced to 128 for lower RAM (enough for license plates)
        return_detection_info: bool = False,
        cropped_image: Image.Image = None,
        detection_info: Dict[str, Any] = None
    ) -> str:
        """
        Perform OCR on an image with optional YOLOv8 plate detection
        
        Pipeline:
        1. Step 1: License Plate Detection
           - If cropped_image is provided: Use it directly (detection already done in app.py)
           - If cropped_image is None: Run HurricaneOD_beta detection to crop plate
           - Result: img_to_ocr (cropped plate or full image)
        2. Step 2: OCR Processing
           - Resize image if needed
           - Prepare prompt and messages
           - Run Hurricane OCR model to extract text
           - Result: OCR text output
        
        Note: This function is called from app.py which already does detection.
              If cropped_image is passed, it will be used directly (no duplicate detection).
        
        Args:
            image: PIL Image object (original full image)
            prompt: Custom prompt for OCR (default: Thai license plate extraction)
            max_new_tokens: Maximum tokens to generate (not used for PaddleOCR)
            return_detection_info: If True, return tuple (ocr_text, detection_info)
            cropped_image: Pre-cropped plate image (optional, to avoid duplicate detection)
            detection_info: Pre-computed detection info (optional, to avoid duplicate detection)
            
        Returns:
            OCR result text, or tuple (text, detection_info) if return_detection_info=True
        """
        if not self._is_loaded:
            raise RuntimeError("Model not loaded. Call load() first.")
        
        # Use provided detection info or initialize default
        if detection_info is None:
            detection_info = {
                "detected": False,
                "bbox": None,
                "confidence": None,
                "used_full_image": True
            }
        
        # Step 1: Use pre-cropped image if provided, otherwise detect and crop
        print("   🔄 Step 1: License Plate Detection...")
        if cropped_image is not None:
            # Use pre-cropped image (detection already done)
            img_to_ocr = cropped_image.convert("RGB")
            if detection_info.get("detected", False):
                print(f"   🔍 Using pre-cropped plate (confidence: {detection_info.get('confidence', 0):.2f})")
        else:
            # Detect and crop license plate if detection is enabled
            img_to_ocr = image.convert("RGB")
            
            if self.use_detection and self.plate_detector is not None:
                try:
                    print("   🔍 Running HurricaneOD_beta detection...")
                    cropped_plate, detections = self.plate_detector.detect_and_crop(img_to_ocr, return_all=False)
                    
                    if cropped_plate is not None and len(detections) > 0:
                        # Use cropped plate for OCR
                        img_to_ocr = cropped_plate
                        detection_info = {
                            "detected": True,
                            "bbox": detections[0]["bbox"],
                            "confidence": detections[0]["confidence"],
                            "used_full_image": False,
                            "all_detections": detections
                        }
                        print(f"   ✅ Detected plate (confidence: {detections[0]['confidence']:.2f})")
                        print(f"   📐 Bounding box: {detections[0]['bbox']}")
                    else:
                        print("   ⚠️ No plate detected, using full image")
                        detection_info["used_full_image"] = True
                except Exception as e:
                    print(f"   ⚠️ Detection error: {e}, using full image")
                    detection_info["used_full_image"] = True
            else:
                print("   ⚠️ Detection disabled or detector not available, using full image")
                detection_info["used_full_image"] = True
        
        print(f"   ✓ Step 1 completed - Image size: {img_to_ocr.size}")
        
        # Step 2: Perform OCR on (cropped) image
        try:
            print("   🔄 Starting OCR processing...")
            
            # Check if model and processor are loaded
            if self.model is None:
                raise RuntimeError("Model is not loaded. Please load the model first.")
            if self.processor is None:
                raise RuntimeError("Processor is not loaded. Please load the processor first.")
            
            # Resize image if needed (reduce size for faster processing)
            print("   🔄 Resizing image if needed...")
            # Reduce max size for faster OCR processing
            original_max_size = self.MAX_IMAGE_SIZE
            self.MAX_IMAGE_SIZE = min(original_max_size, 1024)  # Limit to 1024px for faster processing
            img = self.resize_image(img_to_ocr)
            self.MAX_IMAGE_SIZE = original_max_size  # Restore original
            print(f"   ✓ Image ready for OCR (size: {img.size})")
            
            # Detect model type (TrOCR or Vision-Language)
            is_trocr = False
            model_class_name = self.model.__class__.__name__
            if "Vision2Seq" in model_class_name or "TrOCR" in model_class_name:
                is_trocr = True
            elif hasattr(self.processor, 'apply_chat_template'):
                # Check processor type
                processor_class_name = self.processor.__class__.__name__
                if "TrOCR" in processor_class_name:
                    is_trocr = True
            
            if is_trocr:
                # TrOCR format: Direct image processing (no chat template)
                print("   🔄 Processing with TrOCR format (direct image-to-text)...")
                inputs = self.processor(images=img, return_tensors="pt")
            else:
                # Vision-Language format (Qwen3VL/Typhoon): Chat template
                # Default prompt for Thai license plate
                if prompt is None:
                    prompt = """อ่านและดึงข้อมูลจากรูปป้ายทะเบียนรถไทยนี้ โดยระบุข้อมูลต่อไปนี้:
- เลขทะเบียน (Plate Number): ตัวอักษรและตัวเลขบนป้าย เช่น "กก 1234" หรือ "1กก 5678"
- ตัวอักษร (Characters): ส่วนตัวอักษรไทย เช่น "กก" หรือ "1กก"  
- ตัวเลข (Digits): ส่วนตัวเลข เช่น "1234"
- จังหวัด (Province): ชื่อจังหวัดบนป้าย เช่น "กรุงเทพมหานคร"
- ประเภทรถ (Vehicle Type): ถ้ามีระบุ
- สีป้าย (Plate Color): ขาว, เขียว, เหลือง, แดง ฯลฯ

กรุณาอ่านข้อความทั้งหมดที่เห็นบนป้ายทะเบียน: """
                
                print("   🔄 Preparing messages...")
                messages = [
                    {
                        "role": "user",
                        "content": [
                            {
                                "type": "image",
                                "image": img,
                            },
                            {
                                "type": "text",
                                "text": prompt
                            }
                        ],
                    }
                ]
                
                # Prepare inputs
                print("   🔄 Applying chat template and tokenizing...")
                inputs = self.processor.apply_chat_template(
                    messages,
                    tokenize=True,
                    add_generation_prompt=True,
                    return_dict=True,
                    return_tensors="pt"
                )
            
            # Move inputs to model device
            # Get actual model device (handle device_map case)
            try:
                if hasattr(self.model, 'hf_device_map'):
                    # Model is using device_map, get first device
                    first_device = list(self.model.hf_device_map.values())[0]
                    if isinstance(first_device, torch.device):
                        model_device = first_device
                    else:
                        model_device = torch.device(first_device)
                else:
                    model_device = next(self.model.parameters()).device
            except:
                # Fallback: try to get device from model
                try:
                    model_device = self.model.device if hasattr(self.model, 'device') else torch.device('cpu')
                except:
                    model_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            
            print(f"   🔄 Moving inputs to device: {model_device}")
            inputs = {k: v.to(model_device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
            print("   ✓ Inputs prepared")

            # Only remove image_grid_thw for TrOCR models (they don't use it)
            # Vision-Language models (Qwen3VL/Typhoon) NEED image_grid_thw!
            if is_trocr and "image_grid_thw" in inputs:
                inputs.pop("image_grid_thw")
                print("   🔍 Removed unused key: image_grid_thw (TrOCR model)")
            
            # Generate output with optimized parameters for speed
            print(f"   🔄 Generating OCR output (max_new_tokens={min(max_new_tokens, 128)})...")
            print("   ⏳ This may take a while...")
            
            # Check model device
            print(f"   📍 Model device: {model_device}")
            if model_device.type == 'cpu':
                print("   ⚠️  WARNING: Model is on CPU! This will be VERY slow.")
                print("   💡 Consider using GPU mode for faster inference")
            
            # Use optimized generation parameters for faster inference
            import time
            gen_start = time.time()
            
            # Get tokenizer eos and pad tokens
            tokenizer = self.processor.tokenizer
            pad_token_id = getattr(tokenizer, 'pad_token_id', None) or getattr(tokenizer, 'eos_token_id', None)
            eos_token_id = getattr(tokenizer, 'eos_token_id', None)
            
            with torch.no_grad():  # Disable gradient computation for faster inference
                # Use only valid parameters for Qwen3VL model
                # Note: early_stopping, temperature, top_p, top_k are not valid for greedy decoding (do_sample=False, num_beams=1)
                generation_kwargs = {
                    "max_new_tokens": min(max_new_tokens, 128),  # Capped at 128 for lower RAM (enough for license plates)
                    "do_sample": False,  # Use greedy decoding (faster than sampling)
                    "use_cache": True,  # Enable KV cache for faster generation
                    "num_beams": 1,  # Greedy search (fastest)
                }
                
                # Add token IDs if available
                if pad_token_id is not None:
                    generation_kwargs["pad_token_id"] = pad_token_id
                if eos_token_id is not None:
                    generation_kwargs["eos_token_id"] = eos_token_id
                
                generated_ids = self.model.generate(**inputs, **generation_kwargs)
            
            gen_elapsed = time.time() - gen_start
            print(f"   ✓ Generation completed in {gen_elapsed:.2f} seconds")
            if gen_elapsed > 60:
                print(f"   ⚠️  WARNING: Generation took {gen_elapsed:.2f} seconds (>1 minute)")
                print(f"   💡 This is unusually slow. Check if model is on GPU.")
            
            # Decode output based on model type
            print("   🔄 Decoding output...")
            if is_trocr:
                # TrOCR: Direct decode
                ocr_result = self.processor.decode(generated_ids[0], skip_special_tokens=True)
            else:
                # Vision-Language: Trim input_ids and decode
                input_ids = inputs['input_ids'] if isinstance(inputs, dict) else inputs.input_ids
                generated_ids_trimmed = [
                    out_ids[len(in_ids):] 
                    for in_ids, out_ids in zip(input_ids, generated_ids)
                ]
                output_text = self.processor.batch_decode(
                    generated_ids_trimmed, 
                    skip_special_tokens=True, 
                    clean_up_tokenization_spaces=False
                )
                ocr_result = output_text[0]
                # Extract assistant response if present
                if "<|assistant|>" in ocr_result:
                    ocr_result = ocr_result.split("<|assistant|>")[-1].strip()
            print(f"   ✓ OCR completed successfully")
            print(f"   📝 OCR Result length: {len(ocr_result)} characters")
            
        except Exception as e:
            import traceback
            error_details = traceback.format_exc()
            print(f"   ❌ Error in Step 2 (OCR): {e}")
            print(f"   📋 Error details:\n{error_details}")
            # Return error message instead of crashing
            ocr_result = f"[OCR ERROR] {str(e)}\n\nError details:\n{error_details}"
        
        # Memory cleanup after inference (reduce RAM usage across requests)
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        import gc
        gc.collect()
        
        # Return result with optional detection info
        if return_detection_info:
            return ocr_result, detection_info
        return ocr_result
    
    @property
    def is_loaded(self) -> bool:
        """Check if model is loaded"""
        return self._is_loaded


# Global model instance for Gradio app
_global_model: Optional[OCRModel] = None

def get_model(device_mode: str = None, use_8bit: bool = True) -> OCRModel:
    """Get or create global model instance"""
    global _global_model
    if _global_model is None:
        _global_model = OCRModel(device_mode=device_mode, use_8bit=use_8bit)
    return _global_model

def load_model(device_mode: str = None, use_8bit: bool = True) -> OCRModel:
    """
    Load and return global model instance
    
    Args:
        device_mode: "auto", "gpu", "cpu", or "hybrid" (default: uses global DEVICE_MODE)
        use_8bit: Whether to use 8-bit quantization (default: True, saves VRAM)
    
    Device Modes:
        - "auto": Use GPU if available, fallback to CPU
        - "gpu": Force GPU only
        - "cpu": Force CPU only  
        - "hybrid": GPU + CPU working together (recommended for limited VRAM)
    """
    model = get_model(device_mode=device_mode, use_8bit=use_8bit)
    if not model.is_loaded:
        model.load()
    return model


def check_gpu() -> str:
    """Check GPU availability and return status message"""
    info = get_device_info()
    
    if info["cuda_available"]:
        return f"""✅ GPU Available!
        
Device: {info['cuda_device_name']}
VRAM Total: {info['cuda_memory_total']}
VRAM Free: {info['cuda_memory_free']}
CPU Cores: {info['cpu_count']}

Recommended Modes:
- VRAM >= 8GB: Use "gpu" mode
- VRAM 4-8GB: Use "hybrid" mode  
- No GPU: Use "cpu" mode"""
    else:
        return f"""❌ GPU Not Available

CPU Cores: {info['cpu_count']}
Mode: CPU only (slower)

To use GPU, please install:
1. NVIDIA GPU drivers
2. CUDA Toolkit
3. PyTorch with CUDA support"""