File size: 129,111 Bytes
466fdc0
3beabc6
466fdc0
3beabc6
86efd62
f1fc290
466fdc0
ed93d76
466fdc0
ed93d76
 
 
 
f1fc290
466fdc0
ed93d76
f1fc290
 
 
ed93d76
f1fc290
ed93d76
 
f1fc290
ed93d76
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
 
466fdc0
f1fc290
0e428c5
466fdc0
85c7f72
 
466fdc0
 
 
 
 
 
f0c060a
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
f1fc290
466fdc0
 
 
 
f1fc290
279d509
0cbec99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
f0c060a
279d509
0cbec99
f1fc290
0cbec99
 
466fdc0
0cbec99
 
466fdc0
 
0cbec99
 
f0c060a
0cbec99
 
 
 
 
 
 
 
 
f1fc290
 
0cbec99
279d509
0cbec99
279d509
0cbec99
279d509
0cbec99
 
 
 
 
 
279d509
 
 
 
 
466fdc0
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
0cbec99
934468e
f1fc290
 
 
0cbec99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
0cbec99
 
 
 
f1fc290
 
0cbec99
 
 
 
 
 
 
 
 
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
934468e
466fdc0
 
 
f1fc290
466fdc0
 
 
 
f1fc290
 
85c7f72
 
466fdc0
 
 
 
 
 
 
f1fc290
 
466fdc0
f1fc290
 
466fdc0
 
f1fc290
 
 
 
 
 
 
 
466fdc0
 
 
 
f1fc290
 
466fdc0
f1fc290
466fdc0
 
f1fc290
466fdc0
 
f1fc290
0cbec99
f1fc290
0cbec99
 
f1fc290
466fdc0
 
 
 
 
 
f1fc290
466fdc0
 
f1fc290
466fdc0
 
86efd62
 
 
 
 
 
466fdc0
 
86efd62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86efd62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
86efd62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
86efd62
466fdc0
86efd62
 
 
 
466fdc0
86efd62
 
 
 
 
 
 
 
 
 
 
466fdc0
86efd62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
86efd62
 
f1fc290
466fdc0
 
 
86efd62
 
466fdc0
 
86efd62
466fdc0
 
86efd62
f1fc290
 
466fdc0
f1fc290
 
 
 
 
466fdc0
86efd62
466fdc0
f1fc290
 
466fdc0
 
f1fc290
 
466fdc0
f1fc290
 
 
 
 
 
466fdc0
f1fc290
466fdc0
f1fc290
 
 
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
 
f1fc290
466fdc0
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
 
466fdc0
 
 
 
f1fc290
 
 
 
 
 
 
 
466fdc0
 
f1fc290
466fdc0
 
f1fc290
 
466fdc0
f1fc290
 
466fdc0
f1fc290
 
466fdc0
 
 
f1fc290
 
 
466fdc0
 
f1fc290
466fdc0
f1fc290
 
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
 
f1fc290
 
 
 
466fdc0
f1fc290
 
 
 
 
 
 
 
 
 
466fdc0
 
f1fc290
466fdc0
f1fc290
 
 
 
 
 
 
 
 
 
466fdc0
 
 
 
 
f1fc290
466fdc0
 
 
f1fc290
466fdc0
f1fc290
 
 
466fdc0
 
 
 
f1fc290
466fdc0
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
f1fc290
 
466fdc0
f1fc290
 
466fdc0
 
f1fc290
466fdc0
f1fc290
 
86efd62
 
 
 
 
 
 
f1fc290
 
 
 
466fdc0
 
f1fc290
 
466fdc0
 
f1fc290
 
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
f1fc290
466fdc0
 
f1fc290
 
466fdc0
 
 
f1fc290
466fdc0
 
 
f1fc290
 
0cbec99
f1fc290
 
0cbec99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
0cbec99
 
f1fc290
0cbec99
f1fc290
0cbec99
f1fc290
 
0cbec99
f1fc290
466fdc0
23d7efa
f1fc290
 
0cbec99
f1fc290
 
0cbec99
 
 
 
23d7efa
 
466fdc0
0cbec99
 
466fdc0
0cbec99
 
 
 
466fdc0
0cbec99
 
466fdc0
0cbec99
f1fc290
466fdc0
0cbec99
f1fc290
0cbec99
 
 
 
 
 
f1fc290
0cbec99
 
f1fc290
0cbec99
 
 
f1fc290
0cbec99
 
f1fc290
0cbec99
 
f1fc290
 
0cbec99
f1fc290
466fdc0
f1fc290
 
 
0cbec99
f1fc290
 
0cbec99
 
 
 
 
 
 
 
466fdc0
 
0cbec99
466fdc0
0cbec99
 
 
 
 
 
 
 
 
 
 
466fdc0
0cbec99
 
f1fc290
466fdc0
f1fc290
 
 
466fdc0
f1fc290
466fdc0
f1fc290
 
 
466fdc0
f1fc290
 
 
466fdc0
 
f1fc290
 
 
 
 
 
 
 
 
 
 
 
466fdc0
f1fc290
466fdc0
 
 
f1fc290
466fdc0
 
 
 
 
f1fc290
466fdc0
f1fc290
466fdc0
 
f1fc290
466fdc0
 
f1fc290
 
 
466fdc0
 
 
f1fc290
466fdc0
f1fc290
 
 
466fdc0
 
f1fc290
 
 
466fdc0
f1fc290
 
466fdc0
f1fc290
 
466fdc0
f1fc290
 
 
466fdc0
 
f1fc290
466fdc0
ed93d76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
ed93d76
 
466fdc0
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
f1fc290
466fdc0
 
 
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
86efd62
466fdc0
 
f1fc290
 
466fdc0
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
f1fc290
 
 
 
 
 
 
 
86efd62
f1fc290
 
 
 
 
 
 
 
 
 
466fdc0
 
f1fc290
466fdc0
f1fc290
 
 
466fdc0
 
 
f1fc290
 
466fdc0
 
f1fc290
 
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
 
466fdc0
 
f1fc290
 
 
466fdc0
f1fc290
466fdc0
 
f1fc290
 
 
 
 
 
 
466fdc0
f1fc290
86efd62
f1fc290
 
 
466fdc0
 
f1fc290
 
 
 
 
 
 
466fdc0
f1fc290
 
 
466fdc0
f1fc290
 
466fdc0
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86efd62
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
f1fc290
 
 
 
 
 
 
 
466fdc0
 
f1fc290
466fdc0
 
85c7f72
466fdc0
85c7f72
 
466fdc0
f1fc290
466fdc0
 
 
85c7f72
466fdc0
85c7f72
 
86efd62
85c7f72
466fdc0
85c7f72
 
466fdc0
85c7f72
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
 
85c7f72
 
466fdc0
 
9d0a528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86efd62
466fdc0
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
f1fc290
466fdc0
 
f1fc290
466fdc0
 
 
40abda5
466fdc0
 
 
 
 
40abda5
466fdc0
 
 
afcdc52
f1fc290
afcdc52
 
40abda5
466fdc0
f1fc290
466fdc0
 
afcdc52
 
 
 
40abda5
afcdc52
f1fc290
afcdc52
 
 
 
 
466fdc0
 
f1fc290
 
466fdc0
f1fc290
466fdc0
 
 
 
 
 
 
7f41fdc
f1fc290
466fdc0
 
f1fc290
 
 
7f41fdc
e02acec
 
 
 
 
 
 
 
5b17922
 
 
466fdc0
5b17922
7f41fdc
5b17922
466fdc0
7f41fdc
e02acec
7f41fdc
5b17922
 
f1fc290
5b17922
 
 
7f41fdc
 
 
 
 
e02acec
466fdc0
 
 
e02acec
f1fc290
7f41fdc
23d7efa
f1fc290
e02acec
 
 
466fdc0
 
 
7f41fdc
5b17922
 
7f41fdc
 
5b17922
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
 
 
 
 
 
 
 
 
 
 
86efd62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
 
 
 
 
86efd62
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
86efd62
 
 
 
f1fc290
 
 
 
466fdc0
f1fc290
86efd62
f1fc290
 
 
466fdc0
 
f1fc290
 
 
 
466fdc0
f1fc290
466fdc0
 
 
 
f1fc290
466fdc0
 
 
 
f1fc290
 
 
 
 
 
466fdc0
f1fc290
466fdc0
 
f1fc290
 
466fdc0
 
f1fc290
86efd62
 
466fdc0
86efd62
f1fc290
 
466fdc0
 
f1fc290
 
 
 
 
 
466fdc0
86efd62
f1fc290
466fdc0
 
86efd62
466fdc0
f1fc290
 
3beabc6
f1fc290
 
 
 
 
9d0a528
3613031
466fdc0
 
 
f1fc290
86efd62
 
466fdc0
3613031
 
 
 
f1fc290
d69777a
3beabc6
3613031
3beabc6
3613031
d69777a
f1fc290
466fdc0
9d0a528
466fdc0
9d0a528
 
 
466fdc0
3613031
466fdc0
 
 
 
 
 
f1fc290
466fdc0
f1fc290
 
 
 
 
3613031
f1fc290
466fdc0
d69777a
 
466fdc0
3613031
466fdc0
 
 
f1fc290
3beabc6
 
466fdc0
 
 
d69777a
f1fc290
d69777a
86efd62
466fdc0
23d7efa
d69777a
f1fc290
 
d69777a
 
 
 
 
 
 
 
9d0a528
466fdc0
 
934468e
f1fc290
 
 
 
 
 
934468e
f1fc290
 
 
 
 
 
934468e
f1fc290
 
 
 
 
 
934468e
f1fc290
 
934468e
f1fc290
 
 
 
 
 
 
 
 
934468e
f1fc290
934468e
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
934468e
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
0cbec99
 
f1fc290
 
0cbec99
f1fc290
 
 
0cbec99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
 
 
 
 
 
 
 
 
 
0cbec99
f1fc290
 
 
934468e
 
f1fc290
 
 
 
0cbec99
 
 
 
 
 
 
 
f1fc290
 
 
 
 
 
 
 
0cbec99
 
 
 
 
f1fc290
 
 
 
0cbec99
f1fc290
 
 
 
934468e
f1fc290
934468e
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cbec99
f1fc290
 
 
 
0cbec99
f1fc290
 
 
 
 
 
 
 
 
 
934468e
f1fc290
0cbec99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
934468e
 
 
0cbec99
f1fc290
 
934468e
f1fc290
 
0cbec99
 
934468e
f1fc290
934468e
f1fc290
 
934468e
f1fc290
 
 
 
 
934468e
f1fc290
 
934468e
f1fc290
934468e
f1fc290
 
934468e
 
 
 
 
f1fc290
 
 
 
 
 
934468e
f1fc290
 
 
934468e
f1fc290
 
 
 
466fdc0
 
 
 
 
f1fc290
466fdc0
f1fc290
466fdc0
 
 
 
 
 
f1fc290
466fdc0
f1fc290
466fdc0
 
f1fc290
 
466fdc0
 
f1fc290
466fdc0
 
 
 
 
 
f1fc290
466fdc0
 
f1fc290
466fdc0
 
 
f1fc290
 
 
466fdc0
 
 
 
 
 
 
f1fc290
466fdc0
 
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
 
 
 
f1fc290
 
466fdc0
f1fc290
 
466fdc0
 
 
 
f1fc290
 
466fdc0
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
f1fc290
 
466fdc0
 
 
f1fc290
 
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
 
 
 
466fdc0
 
 
 
f1fc290
466fdc0
 
 
 
 
f1fc290
 
 
 
 
 
 
466fdc0
 
 
 
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
 
 
 
f1fc290
466fdc0
 
 
f1fc290
466fdc0
 
 
 
 
934468e
f1fc290
466fdc0
f1fc290
 
466fdc0
f1fc290
 
466fdc0
 
 
d69777a
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
d69777a
f1fc290
 
 
 
 
 
 
 
466fdc0
f1fc290
 
 
 
 
 
 
 
 
d69777a
 
 
f1fc290
466fdc0
9de49fe
 
 
 
 
466fdc0
 
3beabc6
 
 
 
 
 
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3beabc6
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3beabc6
7f41fdc
 
 
 
3beabc6
7f41fdc
 
 
 
 
 
 
 
3beabc6
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
3beabc6
7f41fdc
 
 
 
 
 
 
3beabc6
 
 
 
 
 
 
 
 
 
 
7f41fdc
3beabc6
 
 
 
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d0a528
 
 
 
 
 
 
 
3beabc6
7f41fdc
3beabc6
 
 
 
7f41fdc
3beabc6
 
 
 
 
 
 
7f41fdc
 
 
3beabc6
7f41fdc
 
 
9d0a528
3beabc6
 
9d0a528
 
 
 
 
 
 
 
 
3beabc6
7f41fdc
9d0a528
7f41fdc
9d0a528
 
3beabc6
7f41fdc
9d0a528
 
7f41fdc
 
 
 
 
 
 
 
 
 
9d0a528
7f41fdc
 
3beabc6
 
 
 
 
9d0a528
7f41fdc
 
 
 
 
9d0a528
 
7f41fdc
9d0a528
 
3beabc6
 
9d0a528
 
 
3beabc6
 
 
7f41fdc
9d0a528
3beabc6
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3beabc6
7f41fdc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3beabc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
f1fc290
466fdc0
f1fc290
466fdc0
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
86efd62
f1fc290
86efd62
f1fc290
 
466fdc0
f1fc290
 
 
 
 
 
 
466fdc0
f1fc290
466fdc0
86efd62
 
466fdc0
 
 
 
86efd62
 
 
343d404
466fdc0
 
f1fc290
 
 
 
466fdc0
 
f1fc290
 
466fdc0
 
 
f1fc290
466fdc0
f1fc290
466fdc0
 
 
 
 
f1fc290
 
343d404
466fdc0
 
 
f1fc290
 
 
466fdc0
 
f1fc290
 
466fdc0
 
 
f1fc290
466fdc0
f1fc290
466fdc0
 
 
 
 
f1fc290
 
343d404
466fdc0
 
 
f1fc290
466fdc0
f1fc290
466fdc0
f1fc290
 
466fdc0
 
 
 
 
 
 
 
 
 
f1fc290
343d404
 
466fdc0
 
f1fc290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466fdc0
 
f1fc290
 
466fdc0
 
f1fc290
 
466fdc0
 
 
 
f1fc290
466fdc0
 
 
 
3beabc6
 
 
466fdc0
3beabc6
 
466fdc0
 
3beabc6
 
f1fc290
3beabc6
 
 
 
466fdc0
3beabc6
466fdc0
 
3beabc6
466fdc0
 
3beabc6
466fdc0
3beabc6
466fdc0
 
 
 
3beabc6
466fdc0
3beabc6
 
343d404
466fdc0
 
 
934468e
f1fc290
 
d69777a
466fdc0
 
f1fc290
 
466fdc0
d69777a
 
 
 
f1fc290
d69777a
 
 
3beabc6
d69777a
 
f1fc290
 
 
 
d69777a
 
 
 
 
466fdc0
 
3beabc6
 
 
466fdc0
3beabc6
 
 
 
 
 
466fdc0
3beabc6
d69777a
3beabc6
466fdc0
 
 
 
3beabc6
d69777a
3beabc6
 
343d404
466fdc0
 
f1fc290
d69777a
f1fc290
 
 
 
d69777a
 
 
f1fc290
934468e
f1fc290
 
934468e
d69777a
f1fc290
 
 
 
d69777a
 
f1fc290
 
d69777a
f1fc290
 
 
 
 
 
 
 
 
 
 
 
d69777a
f1fc290
 
 
 
 
 
 
 
 
d69777a
f1fc290
 
d69777a
f1fc290
 
 
 
 
 
 
 
 
 
 
 
d69777a
f1fc290
 
 
d69777a
466fdc0
 
 
f1fc290
 
 
466fdc0
 
d69777a
f1fc290
466fdc0
 
f1fc290
466fdc0
 
 
 
 
f1fc290
466fdc0
d69777a
f1fc290
 
d69777a
466fdc0
f1fc290
d69777a
 
f1fc290
 
 
 
 
934468e
f1fc290
 
d69777a
 
f1fc290
466fdc0
 
f1fc290
 
466fdc0
d69777a
466fdc0
f1fc290
 
 
466fdc0
 
 
 
 
 
 
 
 
f1fc290
466fdc0
 
 
 
 
 
 
f1fc290
 
466fdc0
 
f1fc290
 
466fdc0
f1fc290
466fdc0
f1fc290
 
466fdc0
f1fc290
 
466fdc0
 
f1fc290
466fdc0
f1fc290
9d0a528
466fdc0
 
 
 
f1fc290
 
466fdc0
 
f1fc290
466fdc0
f1fc290
466fdc0
 
 
 
f1fc290
 
 
 
 
 
 
466fdc0
f1fc290
 
466fdc0
f1fc290
 
 
 
 
 
 
466fdc0
f1fc290
 
466fdc0
f1fc290
 
 
 
 
 
 
 
 
ed93d76
 
 
f1fc290
ed93d76
 
 
 
 
 
466fdc0
f1fc290
466fdc0
 
f1fc290
466fdc0
279d509
f1fc290
279d509
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
# ============================================================================
# ENGLISH LINGUISTICS HUB (CONSOLIDATED APP V24-EN)
#
# This script provides a comprehensive Linguistics Hub for English analysis,
# adding NLTK, Stanza, TextBlob, HanTa(EN), OEWN, and OpenBLP.
# It maintains the exact same JSON output structure as the German app.
#
# ============================================================================
# TABS & FUNCTIONALITY:
# ============================================================================
#
# --- PRIMARY TABS ---
#
# 1. Word Encyclopedia (EN):
#    - NON-CONTEXTUAL analysis of single words.
#    - Multi-engine dispatcher with user selection and automatic fallback:
#      (Wiktionary -> HanTa -> Stanza -> NLTK -> TextBlob)
#    - Aggregates all grammatical (Wiktionary, Pattern) and semantic
#      (Wiktionary, OEWN, OpenBLP, ConceptNet) possibilities.
#
# 2. Comprehensive Analyzer (EN):
#    - CONTEXTUAL analysis of full sentences.
#    - Uses the Word Encyclopedia's dispatcher for robust lemma analysis.
#    - Ranks all semantic senses (Wiktionary, OEWN) by relevance.
#
# ============================================================================

# ============================================================================
# 1. CONSOLIDATED IMPORTS
# ============================================================================
import gradio as gr
import spacy
from spacy import displacy
import base64
import traceback
import subprocess
import sys
import os
from pathlib import Path
import importlib
import site
import threading
import queue
from dataclasses import dataclass
from enum import Enum
from typing import Dict, Any, List, Set, Optional, Tuple
import requests
import zipfile
import re
import sqlite3
import json
from huggingface_hub import hf_hub_download
import gzip
import shutil

# --- Requests and gradio Import (for ConceptNet) ---
try:
    import requests
    from requests.exceptions import RequestException, HTTPError, ConnectionError, Timeout
    REQUESTS_AVAILABLE = True
    print("βœ“ Successfully imported requests.")
except ImportError:
    REQUESTS_AVAILABLE = False
    print("CRITICAL WARNING: `requests` library not found.")

try:
    from gradio_client import Client
    GRADIO_CLIENT_AVAILABLE = True
except ImportError:
    GRADIO_CLIENT_AVAILABLE = False
    print("CRITICAL WARNING: `gradio_client` library not found.")

# --- LanguageTool Import ---
try:
    import language_tool_python
    LT_AVAILABLE = True
    print("βœ“ Successfully imported language_tool")
except ImportError:
    LT_AVAILABLE = False
    print("CRITICAL WARNING: `language-tool-python` library not found.")

# --- WordNet (wn) Import (for OEWN) ---
try:
    import wn
    WN_AVAILABLE = True
    print("βœ“ Successfully imported wordnet (for OEWN)")
except ImportError:
    WN_AVAILABLE = False
    print("CRITICAL WARNING: `wn` library not found.")

# --- Pattern.en Import (ENGLISH) ---
PATTERN_EN_AVAILABLE = False

# Define constants locally as fallbacks (Pattern standard values) to prevent import errors
# Tenses
INFINITIVE = "inf"
PRESENT = "pres"
PAST = "pst"
FUTURE = "fut"
PARTICIPLE = "part"
# Person/Number
FIRST = 1
SECOND = 2
THIRD = 3
SINGULAR = "sg"
PLURAL = "pl"
# POS
NOUN = "NN"
VERB = "VB"
ADJECTIVE = "JJ"

try:
    print("Trying to import pattern.en")
    import pattern.en
    # Import functions safely
    from pattern.en import (
        pluralize, singularize, 
        conjugate, lemma, lexeme, tenses,
        comparative, superlative,
        predicative, attributive,
        article,
        parse, split
    )
    
    # Try to import constants, but don't fail if they are missing (we use fallbacks)
    print("Trying to import pattern constants.")
    try:
        from pattern.en import (
            INFINITIVE, PRESENT, PAST, PARTICIPLE,
            FIRST, SECOND, THIRD, SINGULAR, PLURAL,
            NOUN, VERB, ADJECTIVE
        )
    except ImportError:
        print("Using local fallback constants for Pattern.en")

    PATTERN_EN_AVAILABLE = True
    print("βœ“ Successfully imported pattern.en")

except ImportError:
    print("Using PatternLite fallback logic...")
    try:
        # Attempt simple import for PatternLite structure
        import pattern.en
        from pattern.en import pluralize, singularize, conjugate, lemma, lexeme
        
        # Manually map functions if they are missing in Lite but available under different names
        if not 'comparative' in dir(pattern.en):
            from pattern.en import comparative, superlative
            
        PATTERN_EN_AVAILABLE = True
        print("βœ“ Successfully imported pattern.en (via PatternLite)")
    except ImportError as e:
        PATTERN_EN_AVAILABLE = False
        print(f"CRITICAL WARNING: `pattern.en` library not found: {e}")

# --- HanTa Tagger Import (for EN) ---
try:
    from HanTa.HanoverTagger import HanoverTagger
    import HanTa.HanoverTagger
    sys.modules['HanoverTagger'] = HanTa.HanoverTagger
    HANTA_AVAILABLE = True
    print("βœ“ Successfully imported HanTa")
except ImportError:
    HANTA_AVAILABLE = False
    print("CRITICAL WARNING: `HanTa` library not found.")

# --- NLTK & TextBlob Import ---
try:
    import nltk
    from nltk.corpus import wordnet as nltk_wn
    from nltk.stem import WordNetLemmatizer
    
    # --- CRITICAL: Download required NLTK data ---
    # These are the specific packages causing your "LookupError" and "MissingCorpusError"
    print("Downloading NLTK data...")
    _nltk_packages = [
        'wordnet', 
        'omw-1.4', 
        'averaged_perceptron_tagger', 
        'averaged_perceptron_tagger_eng', # Specific for newer NLTK
        'punkt', 
        'punkt_tab' # Specific for newer TextBlob/NLTK
    ]
    for pkg in _nltk_packages:
        try:
            nltk.download(pkg, quiet=True)
        except Exception as e:
            print(f"Warning: Failed to download NLTK package '{pkg}': {e}")

    NLTK_AVAILABLE = True
    print("βœ“ Successfully imported nltk and downloaded data")
except ImportError:
    NLTK_AVAILABLE = False
    print("WARNING: `nltk` library not found.")
except Exception as e:
    NLTK_AVAILABLE = False
    print(f"WARNING: `nltk` data download failed: {e}")

try:
    from textblob import TextBlob
    TEXTBLOB_AVAILABLE = True
    print("βœ“ Successfully imported textblob")
except ImportError:
    TEXTBLOB_AVAILABLE = False
    print("WARNING: `textblob` library not found.")

# --- Stanza Import ---
try:
    import stanza
    STANZA_AVAILABLE = True
    print("βœ“ Successfully imported stanza")
except ImportError:
    STANZA_AVAILABLE = False
    print("WARNING: `stanza` library not found.")


# --- German-specific imports are not needed ---
IWNLP_AVAILABLE = False
DWDSMOR_AVAILABLE = False

# ============================================================================
# 2. SHARED GLOBALS & CONFIG
# ============================================================================
VERBOSE = True
def log(msg):
    if VERBOSE:
        print(f"[DEBUG] {msg}")

# --- Wiktionary Cache & Lock (ENGLISH) ---
WIKTIONARY_REPO_ID = "cstr/en-wiktionary-sqlite-full"
WIKTIONARY_REMOTE_FILE = "en_wiktionary_normalized_full.db.gz" # File as seen in your screenshot
WIKTIONARY_DB_PATH = "en_wiktionary_normalized.db" # Local extracted file
WIKTIONARY_CONN: Optional[sqlite3.Connection] = None
WIKTIONARY_CONN_LOCK = threading.Lock()
WIKTIONARY_AVAILABLE = False

# --- ConceptNet Cache & Lock ---
CONCEPTNET_CACHE: Dict[Tuple[str, str], Any] = {}
CONCEPTNET_LOCK = threading.Lock()
CONCEPTNET_CLIENT: Optional[Client] = None
CONCEPTNET_CLIENT_LOCK = threading.Lock()

# --- HanTa Tagger Cache & Lock (for EN) ---
HANTA_TAGGER_EN: Optional[HanoverTagger] = None
HANTA_TAGGER_LOCK = threading.Lock()

# --- Stanza Cache & Lock (for EN) ---
STANZA_PIPELINE_EN: Optional[stanza.Pipeline] = None
STANZA_PIPELINE_LOCK = threading.Lock()

# --- NLTK Cache & Lock (for EN) ---
NLTK_LEMMATIZER: Optional[WordNetLemmatizer] = None
NLTK_LEMMATIZER_LOCK = threading.Lock()

# --- Helper ---
def _html_wrap(content: str, line_height: str = "2.0") -> str:
    return f'<div style="overflow-x:auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; line-height: {line_height};">{content}</div>'

# --- Helper for SVA (ENGLISH) ---
def _conjugate_to_person_number_en(verb_lemma: str, person: str, number: str) -> Optional[str]:
    """
    Return a present tense finite form for given person/number (English).
    person in {'1','2','3'}, number in {'sg','pl'}.
    """
    if not PATTERN_EN_AVAILABLE:
        return None
    try:
        p_num = int(person)
        # Use the constants defined in the import block
        n_num = SINGULAR if number == 'sg' else PLURAL
        
        # Explicitly name arguments for safety across Pattern versions
        return conjugate(verb_lemma, tense=PRESENT, person=p_num, number=n_num)
    except Exception:
        return None

# ============================================================================
# 3. SPACY ANALYZER LOGIC
# ============================================================================
# --- Globals & Config for spaCy (Updated for English focus) ---
SPACY_MODEL_INFO: Dict[str, Tuple[str, str, str]] = {
    "en": ("English", "en_core_web_md", "spacy"),
    "de": ("German", "de_core_news_md", "spacy"),
    "es": ("Spanish", "es_core_news_md", "spacy"),
    "grc-proiel-trf": ("Ancient Greek (PROIEL TRF)", "grc_proiel_trf", "grecy"),
    "grc-perseus-trf": ("Ancient Greek (Perseus TRF)", "grc_perseus_trf", "grecy"),
    "grc_ner_trf": ("Ancient Greek (NER TRF)", "grc_ner_trf", "grecy"),
    "grc-proiel-lg": ("Ancient Greek (PROIEL LG)", "grc_proiel_lg", "grecy"),
    "grc-perseus-lg": ("Ancient Greek (Perseus LG)", "grc_perseus_lg", "grecy"),
    "grc-proiel-sm": ("Ancient Greek (PROIEL SM)", "grc_proiel_sm", "grecy"),
    "grc-perseus-sm": ("Ancient Greek (Perseus SM)", "grc_perseus_sm", "grecy"),
}
SPACY_UI_TEXT = {
    "de": {
        "title": "# πŸ” Mehrsprachiger Morpho-Syntaktischer Analysator",
        "subtitle": "Analysieren Sie Texte auf Deutsch, Englisch, Spanisch und Altgriechisch",
        "ui_lang_label": "BenutzeroberflΓ€chensprache",
        "model_lang_label": "Textsprache fΓΌr Analyse",
        "input_label": "Text eingeben",
        "input_placeholder": "Geben Sie hier Ihren Text ein...",
        "button_text": "Text analysieren",
        "button_processing_text": "Verarbeitung lΓ€uft...",
        "tab_graphic": "Grafische Darstellung",
        "tab_table": "Tabelle",
        "tab_json": "JSON",
        "tab_ner": "EntitΓ€ten",
        "html_label": "AbhΓ€ngigkeitsparsing",
        "table_label": "Morphologische Analyse",
        "table_headers": ["Wort", "Lemma", "POS", "Tag", "Morphologie", "AbhΓ€ngigkeit"],
        "json_label": "JSON-Ausgabe",
        "ner_label": "Benannte EntitΓ€ten",
        "error_message": "Fehler: "
    },
    "en": {
        "title": "# πŸ” Multilingual Morpho-Syntactic Analyzer",
        "subtitle": "Analyze texts in German, English, Spanish, and Ancient Greek",
        "ui_lang_label": "Interface Language",
        "model_lang_label": "Text Language for Analysis",
        "input_label": "Enter Text",
        "input_placeholder": "Enter your text here...",
        "button_text": "Analyze Text",
        "button_processing_text": "Processing...",
        "tab_graphic": "Graphic View",
        "tab_table": "Table",
        "tab_json": "JSON",
        "tab_ner": "Entities",
        "html_label": "Dependency Parsing",
        "table_label": "Morphological Analysis",
        "table_headers": ["Word", "Lemma", "POS", "Tag", "Morphology", "Dependency"],
        "json_label": "JSON Output",
        "ner_label": "Named Entities",
        "error_message": "Error: "
    },
    "es": {
        "title": "# πŸ” Analizador Morfo-SintΓ‘ctico MultilingΓΌe",
        "subtitle": "Analice textos en alemΓ‘n, inglΓ©s, espaΓ±ol y griego antiguo",
        "ui_lang_label": "Idioma de la Interfaz",
        "model_lang_label": "Idioma del Texto para AnΓ‘lisis",
        "input_label": "Introducir Texto",
        "input_placeholder": "Ingrese su texto aquΓ­...",
        "button_text": "Analizar Texto",
        "button_processing_text": "Procesando...",
        "tab_graphic": "Vista GrΓ‘fica",
        "tab_table": "Tabla",
        "tab_json": "JSON",
        "tab_ner": "Entidades",
        "html_label": "AnΓ‘lisis de Dependencias",
        "table_label": "AnΓ‘lisis MorfolΓ³gico",
        "table_headers": ["Palabra", "Lema", "POS", "Etiqueta", "MorfologΓ­a", "Dependencia"],
        "json_label": "Salida JSON",
        "ner_label": "Entidades Nombradas",
        "error_message": "Error: "
    }
}
SPACY_MODELS: Dict[str, Optional[spacy.Language]] = {}

# --- Dependency Installation & Model Loading ---
def spacy_install_spacy_transformers_once():
    """ Installs spacy-transformers, required for all _trf models. """
    marker_file = Path(".spacy_transformers_installed")
    if marker_file.exists():
        print("βœ“ spacy-transformers already installed (marker found)")
        return True
    print("Installing spacy-transformers (for _trf models)...")
    cmd = [sys.executable, "-m", "pip", "install", "spacy-transformers"]
    try:
        subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=900)
        print("βœ“ Successfully installed spacy-transformers")
        marker_file.touch()
        return True
    except Exception as e:
        print(f"βœ— FAILED to install spacy-transformers: {e}")
        return False

def spacy_install_grecy_model_from_github(model_name: str) -> bool:
    """ Installs a greCy model from GitHub Release. """
    marker_file = Path(f".{model_name}_installed")
    if marker_file.exists():
        print(f"βœ“ {model_name} already installed (marker found)")
        return True
    print(f"Installing grecy model: {model_name}...")
    if model_name == "grc_proiel_trf":
        wheel_filename = "grc_proiel_trf-3.7.5-py3-none-any.whl"
    elif model_name in ["grc_perseus_trf", "grc_proiel_lg", "grc_perseus_lg",
                        "grc_proiel_sm", "grc_perseus_sm", "grc_ner_trf"]:
        wheel_filename = f"{model_name}-0.0.0-py3-none-any.whl"
    else:
        print(f"βœ— Unknown grecy model: {model_name}")
        return False
    install_url = f"https://github.com/CrispStrobe/greCy/releases/download/v1.0-models/{wheel_filename}"
    cmd = [sys.executable, "-m", "pip", "install", install_url, "--no-deps"]
    print(f"Running: {' '.join(cmd)}")
    try:
        result = subprocess.run(cmd, capture_output=True, text=True, check=True, timeout=900)
        if result.stdout: print("STDOUT:", result.stdout)
        if result.stderr: print("STDERR:", result.stderr)
        print(f"βœ“ Successfully installed {model_name} from GitHub")
        marker_file.touch()
        return True
    except subprocess.CalledProcessError as e:
        print(f"βœ— Installation subprocess FAILED with code {e.returncode}")
        print("STDOUT:", e.stdout)
        print("STDERR:", e.stderr)
        return False
    except Exception as e:
        print(f"βœ— Installation exception: {e}")
        traceback.print_exc()
        return False
    
def spacy_load_spacy_model(model_name: str) -> Optional[spacy.Language]:
    """Load or install a standard spaCy model."""
    try:
        return spacy.load(model_name)
    except OSError:
        print(f"Installing {model_name}...")
        try:
            subprocess.check_call([sys.executable, "-m", "spacy", "download", model_name])
            return spacy.load(model_name)
        except Exception as e:
            print(f"βœ— Failed to install {model_name}: {e}")
            return None

def spacy_load_grecy_model(model_name: str) -> Optional[spacy.Language]:
    """ Load a grecy model, installing from GitHub if needed. """
    if not spacy_install_grecy_model_from_github(model_name):
        print(f"βœ— Cannot load {model_name} because installation failed.")
        return None
    try:
        print("Refreshing importlib to find new package...")
        importlib.invalidate_caches()
        try: importlib.reload(site)
        except Exception: pass
        print(f"Trying: spacy.load('{model_name}')")
        nlp = spacy.load(model_name)
        print(f"βœ“ Successfully loaded {model_name}")
        return nlp
    except Exception as e:
        print(f"βœ— Model {model_name} is installed but FAILED to load.")
        print(f"   Error: {e}")
        traceback.print_exc()
        return None

def spacy_initialize_models():
    """ Pre-load standard models and ensure _trf dependencies are ready. """
    print("\n" + "="*70)
    print("INITIALIZING SPACY MODELS")
    print("="*70 + "\n")
    spacy_install_spacy_transformers_once()
    loaded_count = 0
    spacy_model_count = 0
    for lang_code, (lang_name, model_name, model_type) in SPACY_MODEL_INFO.items():
        if model_type == "spacy":
            spacy_model_count += 1
            print(f"Loading {lang_name} ({model_name})...")
            nlp = spacy_load_spacy_model(model_name)
            SPACY_MODELS[lang_code] = nlp
            if nlp:
                print(f"βœ“ {lang_name} ready\n")
                loaded_count += 1
            else:
                print(f"βœ— {lang_name} FAILED\n")
        else:
            print(f"βœ“ {lang_name} ({model_name}) will be loaded on first use.\n")
            SPACY_MODELS[lang_code] = None
    print(f"Pre-loaded {loaded_count}/{spacy_model_count} standard models.")
    print("="*70 + "\n")


def spacy_get_analysis(ui_lang: str, model_lang_key: str, text: str):
    """Analyze text and return results."""
    ui_config = SPACY_UI_TEXT.get(ui_lang.lower(), SPACY_UI_TEXT["en"])
    error_prefix = ui_config.get("error_message", "Error: ")
    try:
        if not text.strip():
            return ([], [], "<p style='color: orange;'>No text provided.</p>", "<p>No text provided.</p>",
                    gr.Button(value=ui_config.get("button_text", "Analyze"), interactive=True))
        
        nlp = SPACY_MODELS.get(model_lang_key)
        if nlp is None:
            # Try loading one last time
            if model_lang_key in SPACY_MODEL_INFO:
                _, model_name, model_type = SPACY_MODEL_INFO[model_lang_key]
                if model_type == 'grecy': nlp = spacy_load_grecy_model(model_name)
                else: nlp = spacy_load_spacy_model(model_name)
                SPACY_MODELS[model_lang_key] = nlp
        
        if nlp is None:
             return ([], {"error": "Model load failed"}, "Error", "Error", gr.Button(interactive=True))

        doc = nlp(text)
        dataframe_output = []
        json_output = []
        for token in doc:
            lemma_str = token.lemma_
            morph_str = str(token.morph) if token.morph else ''
            dep_str = token.dep_ if doc.is_parsed else ''
            tag_str = token.tag_ or ''
            pos_str = token.pos_ or ''
            json_output.append({
                "word": token.text, "lemma": lemma_str, "pos": pos_str,
                "tag": tag_str, "morphology": morph_str, "dependency": dep_str,
                "is_stopword": token.is_stop
            })
            dataframe_output.append([token.text, lemma_str, pos_str, tag_str, morph_str, dep_str])
        
        html_dep_out = ""
        if "parser" in nlp.pipe_names and doc.is_parsed:
            try:
                options = {"compact": True, "bg": "#ffffff", "color": "#000000", "font": "Source Sans Pro"}
                html_svg = displacy.render(doc, style="dep", jupyter=False, options=options)
                html_dep_out = _html_wrap(html_svg, line_height="2.5")
            except Exception as e:
                html_dep_out = f"<p>Visualization error: {e}</p>"
        
        html_ner_out = ""
        if "ner" in nlp.pipe_names:
            if doc.ents:
                try:
                    html_ner = displacy.render(doc, style="ent", jupyter=False)
                    html_ner_out = _html_wrap(html_ner, line_height="2.5")
                except Exception: html_ner_out = "<p>Error rendering NER</p>"
            else: html_ner_out = "<p>No entities found.</p>"

        return (dataframe_output, json_output, html_dep_out, html_ner_out,
                gr.Button(value=ui_config.get("button_text", "Analyze"), interactive=True))
    except Exception as e:
        traceback.print_exc()
        error_html = f"<div style='color: red;'>{error_prefix} {str(e)}</div>"
        return ([], {"error": str(e)}, error_html, error_html, gr.Button(interactive=True))


def spacy_update_ui(ui_lang: str):
    """Update UI language for the spaCy tab."""
    # Placeholder - actual implementation would update labels
    return [gr.update()] * 14

# ============================================================================
# 4. GRAMMAR CHECKER LOGIC (LanguageTool Only)
# ============================================================================

# --- Globals for LanguageTool ---
LT_TOOL_INSTANCES: Dict[str, Optional[language_tool_python.LanguageTool]] = {}
LT_TOOL_LOCK = threading.Lock()

def lt_get_language_tool(lang: str = 'en') -> Optional[language_tool_python.LanguageTool]:
    """ Thread-safe function to get a LanguageTool instance for a specific language. """
    global LT_TOOL_INSTANCES
    if not LT_AVAILABLE:
        raise ImportError("language-tool-python library is not installed.")

    lang_code = 'en-US' if lang == 'en' else 'de-DE'

    if lang_code in LT_TOOL_INSTANCES:
        return LT_TOOL_INSTANCES[lang_code]

    with LT_TOOL_LOCK:
        if lang_code in LT_TOOL_INSTANCES:
            return LT_TOOL_INSTANCES[lang_code]
        try:
            print(f"Initializing LanguageTool for {lang_code}...")
            tool = language_tool_python.LanguageTool(lang_code)
            _ = tool.check("This is a test.") if lang == 'en' else tool.check("Dies ist ein Test.")
            print(f"LanguageTool ({lang_code}) initialized successfully.")
            LT_TOOL_INSTANCES[lang_code] = tool
            return tool
        except Exception as e:
            print(f"CRITICAL ERROR: Failed to initialize LanguageTool for {lang_code}: {e}")
            return None

def lt_check_grammar(text: str, lang: str = 'en') -> List[Dict[str, Any]]:
    """ Checks text for grammar errors and returns a JSON list. """
    try:
        tool = lt_get_language_tool(lang)
        if tool is None:
            return [{"error": f"LanguageTool service for '{lang}' failed to initialize."}]
        if not text or not text.strip():
            return [{"info": "No text provided to check."}]
        
        matches = tool.check(text)
        if not matches:
            return [{"info": "No errors found!", "status": "perfect"}]
            
        errors_list = []
        for match in matches:
            error = {
                "message": match.message,
                "rule_id": match.ruleId,
                "category": getattr(match.category, 'name', match.category),
                "incorrect_text": text[match.offset : match.offset + match.errorLength],
                "replacements": match.replacements,
                "offset": match.offset,
                "length": match.errorLength,
            }
            errors_list.append(error)
        return errors_list
    except Exception as e:
        traceback.print_exc()
        return [{"error": f"An unexpected error occurred: {str(e)}"}]

# ============================================================================
# 5. WORDNET THESAURUS LOGIC (OEWN)
# ============================================================================
# --- Globals & Classes for WordNet ---
@dataclass
class WordNetWorkItem:
    word: str
    lang: str
    response_queue: queue.Queue

class WordNetWorkerState(Enum):
    NOT_STARTED = 1
    INITIALIZING = 2
    READY = 3
    ERROR = 4

wordnet_worker_state = WordNetWorkerState.NOT_STARTED
wordnet_worker_thread = None
wordnet_work_queue = queue.Queue()
wordnet_en_instance = None # For OEWN

# --- Worker Thread Logic (Adapted for OEWN) ---
def wordnet_download_data():
    """Download WordNet data. Called once by worker thread."""
    if not WN_AVAILABLE:
        print("[WordNet Worker] 'wn' library not available. Skipping download.")
        return False
    try:
        print("[WordNet Worker] Downloading WordNet data...")
        # --- OEWN REPLACEMENT ---
        try:
            wn.download('oewn') # Open English WordNet
            print("βœ“ Downloaded OEWN")
        except Exception as e:
            print(f"[WordNet Worker] Note: oewn download: {e}")
        # --- END REPLACEMENT ---
        try:
            wn.download('cili:1.0')
        except Exception as e:
            print(f"[WordNet Worker] Note: cili download: {e}")
            
        print("[WordNet Worker] βœ“ WordNet data ready")
        return True
    except Exception as e:
        print(f"[WordNet Worker] βœ— Failed to download WordNet data: {e}")
        return False

def wordnet_worker_loop():
    """ Worker thread main loop. """
    global wordnet_worker_state, wordnet_en_instance
    if not WN_AVAILABLE:
        wordnet_worker_state = WordNetWorkerState.ERROR
        return
    try:
        print("[WordNet Worker] Starting worker thread...")
        wordnet_worker_state = WordNetWorkerState.INITIALIZING
        if not wordnet_download_data():
            wordnet_worker_state = WordNetWorkerState.ERROR
            return

        print("[WordNet Worker] Creating WordNet instances...")
        # --- OEWN REPLACEMENT ---
        wordnet_en_instance = wn.Wordnet('oewn')
        print("βœ“ Loaded OEWN (English)")
        # --- END REPLACEMENT ---
        
        wordnet_worker_state = WordNetWorkerState.READY
        print("[WordNet Worker] Ready to process requests")
        
        while True:
            try:
                item: WordNetWorkItem = wordnet_work_queue.get(timeout=1)
                try:
                    if item.lang == 'en':
                        wn_instance = wordnet_en_instance
                    else:
                        # This app is English-only, but we keep the structure
                        raise Exception(f"Language '{item.lang}' not supported by this worker.")

                    if wn_instance is None:
                        raise Exception(f"WordNet instance for '{item.lang}' is not loaded.")
                        
                    result = wordnet_process_word_lookup(item.word, wn_instance)
                    item.response_queue.put(("success", result))
                except Exception as e:
                    traceback.print_exc()
                    item.response_queue.put(("error", str(e)))
                finally:
                    wordnet_work_queue.task_done()
            except queue.Empty:
                continue
    except Exception as e:
        print(f"[WordNet Worker] Fatal error: {e}")
        traceback.print_exc()
        wordnet_worker_state = WordNetWorkerState.ERROR

def wordnet_process_word_lookup(word: str, wn_instance: wn.Wordnet) -> Dict[str, Any]:
    """ Process a single word lookup. Runs in the worker thread. """
    if not word or not word.strip():
        return {"info": "No word provided to check."}
    word = word.strip().lower()
    senses = wn_instance.senses(word)
    if not senses:
        return {"info": f"The word '{word}' was not found in the thesaurus."}
    
    results: Dict[str, Any] = {"input_word": word, "senses": []}
    for sense in senses:
        synset = sense.synset()
        def get_lemmas(synsets, remove_self=False):
            lemmas: Set[str] = set()
            for s in synsets:
                for lemma in s.lemmas():
                    if not (remove_self and lemma == word):
                        lemmas.add(lemma)
            return sorted(list(lemmas))
        
        antonym_words: Set[str] = set()
        try:
            for ant_sense in sense.get_related('antonym'):
                antonym_words.add(ant_sense.word().lemma())
        except Exception:
            pass
            
        sense_info = {
            "pos": synset.pos,
            "definition": synset.definition() or "No definition available.",
            "synonyms": get_lemmas([synset], remove_self=True),
            "antonyms": sorted(list(antonym_words)),
            "hypernyms (is a type of)": get_lemmas(synset.hypernyms()),
            "hyponyms (examples are)": get_lemmas(synset.hyponyms()),
            "holonyms (is part of)": get_lemmas(synset.holonyms()),
            "meronyms (has parts)": get_lemmas(synset.meronyms()),
        }
        results["senses"].append(sense_info)
    return results

def wordnet_start_worker():
    """Start the worker thread if not already started."""
    global wordnet_worker_thread, wordnet_worker_state
    if wordnet_worker_state != WordNetWorkerState.NOT_STARTED:
        return
    if not WN_AVAILABLE:
        wordnet_worker_state = WordNetWorkerState.ERROR
        return
    wordnet_worker_thread = threading.Thread(target=wordnet_worker_loop, daemon=True, name="WordNetWorker")
    wordnet_worker_thread.start()
    timeout = 30
    for _ in range(timeout * 10):
        if wordnet_worker_state in (WordNetWorkerState.READY, WordNetWorkerState.ERROR):
            break
        threading.Event().wait(0.1)
    if wordnet_worker_state != WordNetWorkerState.READY:
        raise Exception("OdeNet Worker failed to initialize")

# --- Public API (Adapted) ---
def wordnet_get_thesaurus_info(word: str, lang: str = 'en') -> Dict[str, Any]:
    """ Public API: Finds thesaurus info. Thread-safe. """
    if not WN_AVAILABLE:
        return {"error": "WordNet (wn) library is not available."}
    if wordnet_worker_state != WordNetWorkerState.READY:
        return {"error": "WordNet service is not ready. Please try again."}
    try:
        response_queue = queue.Queue()
        item = WordNetWorkItem(word=word, lang=lang, response_queue=response_queue) # <-- Pass lang
        wordnet_work_queue.put(item)
        try:
            status, result = response_queue.get(timeout=30)
            if status == "success":
                return result
            else:
                return {"error": f"Lookup failed: {result}"}
        except queue.Empty:
            return {"error": "Request timed out"}
    except Exception as e:
        traceback.print_exc()
        return {"error": f"An unexpected error occurred: {str(e)}"}

# ============================================================================
# 6. PATTERN INFLECTION LOGIC (pattern.en)
# ============================================================================

def pattern_is_good_analysis(analysis, analysis_type):
    """Check if an analysis has meaningful data."""
    if not analysis: return False
    
    if analysis_type == 'noun':
        return 'plural' in analysis and analysis['plural'] != analysis['singular']
        
    elif analysis_type == 'verb':
        present = analysis.get('conjugation', {}).get('Present', {})
        if len(present) < 3: return False
        return True
        
    elif analysis_type == 'adjective':
        return 'comparative' in analysis or 'superlative' in analysis
        
    return False

def pattern_analyze_as_noun_en(word: str, hint_lemma: str = None) -> Dict[str, Any]:
    """Comprehensive noun inflection analysis for English."""
    log(f"   Analyzing as noun (hint_lemma={hint_lemma})")
    if not PATTERN_EN_AVAILABLE: return {'error': 'pattern.en not available'}
    
    # 1. Determine Singular/Plural base
    # If the word is already plural, singularize it to get the lemma
    try:
        singular_form = singularize(word)
        plural_form = pluralize(singular_form)
    except Exception as e:
        return {'error': f'Inflection failed: {e}'}

    # 2. Get Indefinite Article (a/an)
    try:
        art = article(singular_form)
        art_str = f"{art} {singular_form}"
    except Exception:
        art_str = f"a/an {singular_form}"

    analysis = {
        "base_form": singular_form,
        "singular": singular_form,
        "plural": plural_form,
        "article": art_str,
        "declension": {
            "Singular": {"form": singular_form},
            "Plural": {"form": plural_form}
        },
        "gender": "N/A" # English nouns strictly do not have grammatical gender
    }
    return analysis

def pattern_analyze_as_verb_en(word: str, hint_lemma: str = None) -> Dict[str, Any]:
    """Comprehensive verb conjugation analysis for English."""
    log(f"   Analyzing as verb (hint_lemma={hint_lemma})")
    if not PATTERN_EN_AVAILABLE: return {'error': 'pattern.en not available'}

    # 1. Get Lemma
    try:
        verb_lemma = lemma(word)
    except:
        verb_lemma = word
        
    analysis = {"infinitive": verb_lemma}

    # 2. Get Lexeme (List of all forms)
    try:
        # lexeme returns: [infinitive, 3sg, present_participle, past, past_participle]
        # e.g., be => ['be', 'is', 'being', 'was', 'been']
        forms = lexeme(verb_lemma)
        analysis["lexeme"] = forms
    except Exception as e:
        log(f"   Failed to get lexeme: {e}")
        analysis["lexeme"] = []

    # 3. Conjugation Table
    analysis["conjugation"] = {}
    try:
        # Present Tense
        analysis["conjugation"]["Present"] = {
            "I (1sg)":       conjugate(verb_lemma, tense=PRESENT, person=1, number=SINGULAR),
            "you (2sg)":     conjugate(verb_lemma, tense=PRESENT, person=2, number=SINGULAR),
            "he/she (3sg)":  conjugate(verb_lemma, tense=PRESENT, person=3, number=SINGULAR),
            "we (1pl)":      conjugate(verb_lemma, tense=PRESENT, person=1, number=PLURAL),
            "you (2pl)":     conjugate(verb_lemma, tense=PRESENT, person=2, number=PLURAL),
            "they (3pl)":    conjugate(verb_lemma, tense=PRESENT, person=3, number=PLURAL),
        }
        
        # Past Tense (Pattern usually handles simple past variations)
        analysis["conjugation"]["Past"] = {
            "I (1sg)":       conjugate(verb_lemma, tense=PAST, person=1, number=SINGULAR),
            "he/she (3sg)":  conjugate(verb_lemma, tense=PAST, person=3, number=SINGULAR),
            "General":       conjugate(verb_lemma, tense=PAST) # For regular verbs where all are same
        }

        # Participles
        analysis["participles"] = {
            "Present Participle (gerund)": conjugate(verb_lemma, tense=PRESENT, aspect="progressive"), # or aspect=PROGRESSIVE
            "Past Participle": conjugate(verb_lemma, tense=PAST, aspect="perfective") # or use PARTICIPLE constant
        }
    except Exception as e:
        log(f"   Failed to conjugate: {e}")
        
    return analysis
    
def pattern_analyze_as_adjective_en(word: str, hint_lemma: str = None) -> Dict[str, Any]:
    """Comprehensive adjective inflection analysis for English."""
    log(f"   Analyzing as adjective (hint_lemma={hint_lemma})")
    if not PATTERN_EN_AVAILABLE: return {'error': 'pattern.en not available'}
    
    try:
        # If the word is comparative/superlative, try to get the base (predicative)
        # Note: Pattern doesn't have a strong 'un-grade' function, so we rely on lemma if available
        # or assumes input is the base.
        base = word 
    except Exception:
        base = word

    analysis = {}
    analysis["predicative"] = base
    
    try:
        comp = comparative(base)
        sup = superlative(base)
        
        analysis["comparative"] = comp
        analysis["superlative"] = sup
        
        analysis["grading"] = {
            "Positive": base,
            "Comparative": comp,
            "Superlative": sup
        }
    except Exception as e:
        log(f"   Failed to get comparison: {e}")
        analysis["grading"] = {"error": "Could not grade adjective"}
        
    return analysis

# --- Public API (Adapted) ---
def pattern_get_all_inflections(word: str, lang: str = 'en') -> Dict[str, Any]:
    """
    Generates ALL possible inflections for an English word.
    """
    if lang != 'en' or not PATTERN_EN_AVAILABLE:
        return {"error": "`pattern.en` library not available or lang not 'en'."}
        
    word = word.strip()
    log(f"ANALYZING (EN): {word}")
    
    analyses: Dict[str, Any] = {}
    
    try:
        noun_analysis = pattern_analyze_as_noun_en(word)
        if noun_analysis and not noun_analysis.get("error"):
             analyses["noun"] = noun_analysis
            
        verb_analysis = pattern_analyze_as_verb_en(word)
        if verb_analysis and not verb_analysis.get("error"):
            analyses["verb"] = verb_analysis
            
        adj_analysis = pattern_analyze_as_adjective_en(word)
        if adj_analysis and not adj_analysis.get("error"):
            analyses["adjective"] = adj_analysis

    except Exception as e:
        return {"error": f"An unexpected error occurred: {str(e)}"}

    results: Dict[str, Any] = {
        "input_word": word,
        "analyses": analyses
    }
    if not results["analyses"]:
        results["info"] = "Word could not be analyzed as noun, verb, or adjective."
    return results

def word_appears_in_inflections_en(word: str, inflections: Dict[str, Any], pos_type: str) -> bool:
    """
    Check if the input word appears in the English inflection forms.
    """
    word_lower = word.lower()
    actual_forms = set()

    if pos_type == 'noun':
        actual_forms.add(inflections.get('singular', '').lower())
        actual_forms.add(inflections.get('plural', '').lower())
        
    elif pos_type == 'verb':
        conjugation = inflections.get('conjugation', {})
        for tense_data in conjugation.values():
            if isinstance(tense_data, dict): actual_forms.update(v.lower() for v in tense_data.values())
        participles = inflections.get('participles', {})
        actual_forms.update(v.lower() for v in participles.values())
        actual_forms.update(f.lower() for f in inflections.get('lexeme', []))
        actual_forms.add(inflections.get('infinitive', '').lower())

    elif pos_type == 'adjective':
        actual_forms.add(inflections.get('predicative', '').lower())
        actual_forms.add(inflections.get('comparative', '').lower())
        actual_forms.add(inflections.get('superlative', '').lower())
    
    elif pos_type == 'adverb':
        return True # Adverbs are non-inflecting, always valid

    if word_lower in actual_forms:
        log(f"   βœ“ Word '{word}' was found in the {pos_type} inflection table.")
        return True
    
    log(f"   βœ— Word '{word}' not found in any {pos_type} inflection forms.")
    return False

# ============================================================================
# 6b. CONCEPTNET & OPENBLP LOGIC
# ============================================================================
def get_conceptnet_client() -> Optional[Client]:
    """ Thread-safe function to get a single instance of the Gradio Client. """
    global CONCEPTNET_CLIENT
    if not GRADIO_CLIENT_AVAILABLE:
        return None
    
    if CONCEPTNET_CLIENT:
        return CONCEPTNET_CLIENT
    
    with CONCEPTNET_CLIENT_LOCK:
        if CONCEPTNET_CLIENT:
            return CONCEPTNET_CLIENT
        try:
            print("Initializing Gradio Client for ConceptNet...")
            client = Client("cstr/conceptnet_normalized")
            print("βœ“ Gradio Client for ConceptNet initialized.")
            CONCEPTNET_CLIENT = client
            return CONCEPTNET_CLIENT
        except Exception as e:
            print(f"βœ— CRITICAL: Failed to initialize ConceptNet Gradio Client: {e}")
            return None

def conceptnet_get_relations(word: str, language: str = 'en') -> Dict[str, Any]:
    """
    Fetches relations from the cstr/conceptnet_normalized Gradio API.
    """
    if not GRADIO_CLIENT_AVAILABLE:
        return {"error": "`gradio_client` library is not installed."}
        
    if not word or not word.strip():
        return {"info": "No word provided."}
        
    word_lower = word.strip().lower()
    cache_key = (word_lower, language)
    
    with CONCEPTNET_LOCK:
        if cache_key in CONCEPTNET_CACHE:
            log(f"ConceptNet: Found '{word_lower}' in cache.")
            return CONCEPTNET_CACHE[cache_key]
            
    log(f"ConceptNet: Fetching '{word_lower}' from Gradio API...")
    
    try:
        client = get_conceptnet_client()
        if not client:
            return {"error": "ConceptNet Gradio Client is not available."}
        
        selected_relations = ["RelatedTo", "IsA", "PartOf", "HasA", "UsedFor", "CapableOf", "AtLocation", "Synonym", "Antonym", "Causes", "HasProperty", "MadeOf", "HasSubevent", "DerivedFrom", "SimilarTo"]
        
        result_markdown = client.predict(
            word=word_lower,
            lang=language,
            selected_relations=selected_relations,
            api_name="/get_semantic_profile"
        )
        
        relations_list = []
        if not isinstance(result_markdown, str):
            raise TypeError(f"ConceptNet API returned type {type(result_markdown)}, expected str.")

        lines = result_markdown.split('\n')
        current_relation = None
        line_pattern = None

        for line in lines:
            line = line.strip()
            if not line: continue
            if line.startswith('## '):
                current_relation = line[3:].strip()
                if current_relation:
                    line_pattern = re.compile(
                        r"-\s*(.+?)\s+(%s)\s+β†’\s+(.+?)\s+\`\[([\d.]+)\]\`" % re.escape(current_relation)
                    )
                continue

            if line.startswith('- ') and current_relation and line_pattern:
                match = line_pattern.search(line)
                if not match: continue
                try:
                    node1 = match.group(1).strip().strip('*')
                    relation = match.group(2)
                    node2 = match.group(3).strip().strip('*')
                    weight = float(match.group(4))
                    
                    other_node, direction = None, None
                    if node1.lower() == word_lower and node2.lower() != word_lower:
                        other_node, direction = node2, "->"
                    elif node2.lower() == word_lower and node1.lower() != word_lower:
                        other_node, direction = node1, "<-"
                    else:
                        continue
                        
                    relations_list.append({
                        "relation": relation, "direction": direction, "other_node": other_node,
                        "other_lang": language, "weight": weight,
                        "surface": f"{node1} {relation} {node2}"
                    })
                except Exception as e:
                    log(f"ConceptNet Parser: Error parsing line '{line}': {e}")
        
        if not relations_list:
            final_result = {"info": f"No valid relations found for '{word_lower}'."}
        else:
            relations_list.sort(key=lambda x: x.get('weight', 0.0), reverse=True)
            final_result = {"relations": relations_list}
            
        with CONCEPTNET_LOCK:
            CONCEPTNET_CACHE[cache_key] = final_result
            
        log(f"ConceptNet: Returning {len(relations_list)} relations for '{word_lower}'")
        return final_result
        
    except Exception as e:
        error_msg = f"ConceptNet Gradio API request failed: {type(e).__name__} - {e}"
        return {"error": error_msg}

# --- OpenBLP Stub ---
def openblp_get_relations(lemma: str) -> List[Dict[str, Any]]:
    """ 
    Stub function to query OpenBLP. 
    Replace this with your actual OpenBLP database/API query.
    """
    # Placeholder logic
    if lemma == "dog":
        return [
            {"relation": "HasProperty", "other_node": "loyal", "weight": 0.9, "source": "openblp"},
            {"relation": "IsA", "other_node": "animal", "weight": 1.0, "source": "openblp"}
        ]
    if lemma == "cat":
        return [
            {"relation": "HasProperty", "other_node": "independent", "weight": 0.8, "source": "openblp"}
        ]
    return [] 

# ============================================================================
# 6c. NEW: HANTA (EN) INITIALIZER & ENGINE
# ============================================================================
def hanta_get_tagger_en() -> Optional[HanoverTagger]:
    """ Thread-safe function to get the ENGLISH HanTa Tagger. """
    global HANTA_TAGGER_EN
    if not HANTA_AVAILABLE:
        raise ImportError("HanTa library is not installed.")
    
    if HANTA_TAGGER_EN:
        return HANTA_TAGGER_EN
    
    with HANTA_TAGGER_LOCK:
        if HANTA_TAGGER_EN:
            return HANTA_TAGGER_EN
        try:
            print("Initializing HanTa Tagger (English)...")
            PACKAGE_DIR = os.path.dirname(HanTa.HanoverTagger.__file__)
            MODEL_PATH = os.path.join(PACKAGE_DIR, 'morphmodel_en.pgz')
            if not os.path.exists(MODEL_PATH):
                 raise FileNotFoundError(f"HanTa English model not found at {MODEL_PATH}")

            tagger = HanoverTagger(MODEL_PATH)
            _ = tagger.analyze("Test") # Warm-up call
            print("βœ“ HanTa Tagger (English) initialized successfully.")
            HANTA_TAGGER_EN = tagger
            return HANTA_TAGGER_EN
        except Exception as e:
            print(f"CRITICAL ERROR: Failed to initialize HanTa (EN) Tagger: {e}")
            return None

def _hanta_pos_to_key(hanta_pos: str) -> Optional[str]:
    """ Maps HanTa's complex POS tags to simple keys. """
    if hanta_pos.startswith('N'): return "noun"
    if hanta_pos.startswith('VV'): return "verb"
    if hanta_pos.startswith('ADJ'): return "adjective"
    if hanta_pos == 'ADV': return "adverb"
    return None

def _analyze_word_with_hanta_en(word: str, top_n: int) -> Dict[str, Any]:
    """ (FALLBACK ENGINE 1) Analyzes a single word using HanTa (EN). """
    if not HANTA_AVAILABLE: return {}
    print(f"\n[Word Encyclopedia] Running HanTa (EN) fallback for: \"{word}\"")
    final_result = {"input_word": word, "analysis": {}}
    
    try:
        tagger = hanta_get_tagger_en()
        if not tagger: return {}
        
        possible_tags = tagger.tag_word(word.lower())
        possible_tags.extend(tagger.tag_word(word.capitalize()))
        
        processed_lemmas_pos: Set[Tuple[str, str]] = set()

        for hanta_pos, _ in possible_tags:
            pos_key = _hanta_pos_to_key(hanta_pos)
            if not pos_key: continue

            raw_analysis = tagger.analyze(word.lower() if pos_key != 'noun' else word.capitalize())
            lemma = raw_analysis[0] # The lemma
            
            if (lemma, pos_key) in processed_lemmas_pos:
                continue
            processed_lemmas_pos.add((lemma, pos_key))
            log(f"--- Analyzing HanTa (EN) path: lemma='{lemma}', pos='{pos_key}' ---")
            
            pattern_block = {}
            if PATTERN_EN_AVAILABLE:
                if pos_key == "noun": pattern_block = pattern_analyze_as_noun_en(lemma)
                elif pos_key == "verb": pattern_block = pattern_analyze_as_verb_en(lemma)
                elif pos_key == "adjective": pattern_block = pattern_analyze_as_adjective_en(lemma)
                elif pos_key == "adverb": pattern_block = {"base_form": lemma, "info": "Adverbs are non-inflecting."}
            
            semantics_block = _build_semantics_block_for_lemma(lemma, pos_key, top_n, 'en')
            
            pos_entry_report = {
                "hanta_analysis": { 
                    "lemma": lemma,
                    "pos_tag": hanta_pos,
                    "analysis_string": str(raw_analysis),
                    "source": "hanta_en"
                },
                "inflections_pattern": pattern_block,
                "semantics_combined": semantics_block
            }
            
            if word_appears_in_inflections_en(word, pattern_block, pos_key):
                if pos_key not in final_result["analysis"]:
                    final_result["analysis"][pos_key] = []
                final_result["analysis"][pos_key].append(pos_entry_report)
            else:
                log(f"   βœ— HanTa (EN) path {lemma}/{pos_key} REJECTED by validation.")

        if not final_result["analysis"]: return {}
        final_result["info"] = "Analysis from HanTa (EN) (Fallback 1)."
        return final_result

    except Exception as e:
        log(f"HanTa (EN) Engine FAILED: {e}")
        traceback.print_exc()
        return {}

# ============================================================================
# 6d. WIKTIONARY DATABASE LOGIC (EN)
# ============================================================================
def wiktionary_download_db() -> bool:
    """ Downloads the compressed English Wiktionary DB and extracts it. """
    global WIKTIONARY_AVAILABLE
    
    # Check if the extracted DB already exists
    if os.path.exists(WIKTIONARY_DB_PATH):
        print(f"βœ“ English Wiktionary DB '{WIKTIONARY_DB_PATH}' already exists.")
        WIKTIONARY_AVAILABLE = True
        return True

    print(f"English Wiktionary DB not found. Downloading '{WIKTIONARY_REMOTE_FILE}' from '{WIKTIONARY_REPO_ID}'...")
    try:
        # 1. Download the .gz file
        downloaded_gz_path = hf_hub_download(
            repo_id=WIKTIONARY_REPO_ID,
            filename=WIKTIONARY_REMOTE_FILE,
            repo_type="dataset",
            local_dir="."
            # Removed deprecated `local_dir_use_symlinks`
        )
        
        # 2. Decompress the .gz file to the .db file
        print(f"Downloading complete. Extracting '{downloaded_gz_path}' to '{WIKTIONARY_DB_PATH}'...")
        with gzip.open(downloaded_gz_path, 'rb') as f_in:
            with open(WIKTIONARY_DB_PATH, 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
        
        # Optional: Cleanup the .gz file to save space
        try:
            os.remove(downloaded_gz_path)
        except OSError:
            pass

        print(f"βœ“ English Wiktionary DB downloaded and extracted successfully.")
        WIKTIONARY_AVAILABLE = True
        return True
    except Exception as e:
        print(f"βœ— CRITICAL: Failed to download/extract English Wiktionary DB: {e}")
        # traceback.print_exc() # Uncomment for deep debugging
        return False

def wiktionary_run_startup_diagnostics():
    """ Runs critical checks on the DB structure and content at startup. """
    print("\n" + "="*50)
    print("RUNNING WIKTIONARY DB DIAGNOSTICS")
    print("="*50)
    
    conn = wiktionary_get_connection()
    if not conn:
        print("βœ— Diagnostics aborted: No DB connection.")
        return

    try:
        # 1. Check Table Structure
        print("[1] Checking Tables...")
        tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
        table_names = [t['name'] for t in tables]
        print(f"    Found tables: {table_names}")
        
        if 'entries' not in table_names:
            print("CRITICAL ERROR: 'entries' table missing!")
            return

        # 2. Check Language Encoding (The likely cause of your empty results)
        print("\n[2] Checking Language Format (Top 5)...")
        langs = conn.execute("SELECT lang, COUNT(*) as c FROM entries GROUP BY lang ORDER BY c DESC LIMIT 5").fetchall()
        for row in langs:
            print(f"    - '{row['lang']}': {row['c']} entries")

        # 3. Check Specific 'Missing' Words
        test_words = ["ready", "runner", "run", "house"]
        print(f"\n[3] Probing missing words: {test_words}")
        for word in test_words:
            # Check exact match raw
            raw = conn.execute("SELECT count(*) as c FROM entries WHERE word = ?", (word,)).fetchone()
            print(f"    - '{word}' (Raw check): Found {raw['c']} rows")
            
            if raw['c'] == 0:
                # Check case insensitive
                nocase = conn.execute("SELECT word FROM entries WHERE word LIKE ? LIMIT 1", (word,)).fetchone()
                if nocase:
                    print(f"      ! WARNING: '{word}' not found exactly, but found '{nocase['word']}' (Case mismatch?)")
                else:
                    print(f"      ! CRITICAL: '{word}' does not exist in DB at all.")

    except Exception as e:
        print(f"βœ— Diagnostics crashed: {e}")
        traceback.print_exc()
    print("="*50 + "\n")

def wiktionary_get_connection() -> Optional[sqlite3.Connection]:
    """ Thread-safe function to get a single, read-only SQLite connection. """
    global WIKTIONARY_CONN, WIKTIONARY_AVAILABLE
    if not WIKTIONARY_AVAILABLE:
        log("Wiktionary DB is not available, cannot create connection.")
        return None

    if WIKTIONARY_CONN:
        return WIKTIONARY_CONN

    with WIKTIONARY_CONN_LOCK:
        if WIKTIONARY_CONN:
            return WIKTIONARY_CONN
        
        if not os.path.exists(WIKTIONARY_DB_PATH):
            log("Wiktionary DB file missing, connection failed.")
            WIKTIONARY_AVAILABLE = False
            return None
            
        try:
            log("Creating new read-only connection to Wiktionary DB...")
            db_uri = f"file:{WIKTIONARY_DB_PATH}?mode=ro"
            conn = sqlite3.connect(db_uri, uri=True, check_same_thread=False)
            conn.row_factory = sqlite3.Row 
            _ = conn.execute("SELECT name FROM sqlite_master WHERE type='table' LIMIT 1").fetchone()
            print("βœ“ Wiktionary DB connection successful.")
            WIKTIONARY_CONN = conn
            return WIKTIONARY_CONN
        except Exception as e:
            print(f"βœ— CRITICAL: Failed to connect to Wiktionary DB: {e}")
            WIKTIONARY_AVAILABLE = False
            return None

def _wiktionary_map_pos_key(wikt_pos: Optional[str]) -> str:
    """Maps Wiktionary POS tags to our internal keys."""
    if not wikt_pos: return "unknown"
    if wikt_pos == "noun": return "noun"
    if wikt_pos == "verb": return "verb"
    if wikt_pos == "adj": return "adjective"
    if wikt_pos == "adv": return "adverb"
    return wikt_pos

def _wiktionary_build_report_for_entry(entry_id: int, conn: sqlite3.Connection) -> Dict[str, Any]:
    """ (REVISED FOR FULL DB V3) Fetches ALL data for a single entry_id. """
    report = {}
    
    entry_data = conn.execute(
        "SELECT word, title, redirect, pos, pos_title, lang, etymology_text FROM entries WHERE id = ?", (entry_id,)
    ).fetchone()
    if not entry_data:
        return {"error": "Entry ID not found"}
    report.update(dict(entry_data))
    report["entry_id"] = entry_id
    report["lemma"] = entry_data["word"]

    senses_q = conn.execute(
        """
        SELECT 
            s.id as sense_id, s.sense_index,
            (SELECT GROUP_CONCAT(g.gloss_text, '; ') FROM glosses g WHERE g.sense_id = s.id) as glosses,
            (SELECT GROUP_CONCAT(t.tag, ', ') FROM sense_tags st JOIN tags t ON st.tag_id = t.id WHERE st.sense_id = s.id) as tags,
            (SELECT GROUP_CONCAT(top.topic, ', ') FROM sense_topics stop JOIN topics top ON stop.topic_id = top.id WHERE stop.sense_id = s.id) as topics
        FROM senses s
        WHERE s.entry_id = ? ORDER BY s.id
        """, (entry_id,)
    ).fetchall()
    
    senses_list = []
    for sense_row in senses_q:
        sense_dict = dict(sense_row)
        sense_id = sense_dict["sense_id"]
        examples_q = conn.execute(
            "SELECT text, ref FROM examples WHERE sense_id = ?", (sense_id,)
        ).fetchall()
        sense_dict["examples"] = [dict(ex) for ex in examples_q]
        senses_list.append(sense_dict)
    report["senses"] = senses_list

    forms_q = conn.execute(
        """
        SELECT f.form_text, f.sense_index,
            (SELECT GROUP_CONCAT(t.tag, ', ') FROM form_tags ft JOIN tags t ON ft.tag_id = t.id WHERE ft.form_id = f.id) as tags
        FROM forms f
        WHERE f.entry_id = ? GROUP BY f.id ORDER BY f.id
        """, (entry_id,)
    ).fetchall()
    report["forms"] = [dict(f) for f in forms_q]

    return report

def _wiktionary_find_all_entries(word: str, conn: sqlite3.Connection) -> List[Dict[str, Any]]:
    """ Finds entries with verbose debugging if lookup fails. """
    log(f"Wiktionary (EN): Querying for '{word}'...")
    found_entry_ids: Set[int] = set()
    
    lang_query = 'English'
    form_titles = ("Inflected form", "verb form", "noun form", "adjective form", "Comparative", "Superlative") 

    # Search variants: input, lowercase, title-case
    search_variants = list(set([word, word.lower(), word.title()]))
    placeholders = ', '.join('?' for _ in search_variants)

    # 1. Search Lemmatized Entries
    sql_lemma = f"SELECT id, pos_title, word FROM entries WHERE word IN ({placeholders}) AND lang = ?"
    params_lemma = list(search_variants) + [lang_query]
    
    lemma_q = conn.execute(sql_lemma, params_lemma).fetchall()
    
    parent_lemmas_to_find: Set[str] = set()
    
    for row in lemma_q:
        entry_id = row["id"]
        pos_title = row["pos_title"] or ""
        found_entry_ids.add(entry_id)
        
        # Check for parent lemma in "form_of" field
        if any(ft in pos_title for ft in form_titles):
            form_of_q = conn.execute("SELECT form_of FROM senses WHERE entry_id = ?", (entry_id,)).fetchall()
            for form_row in form_of_q:
                form_of_json = form_row["form_of"]
                if not form_of_json: continue
                try:
                    form_of_data = json.loads(form_of_json)
                    if isinstance(form_of_data, list) and form_of_data:
                        parent = form_of_data[0].get("word")
                        if parent: parent_lemmas_to_find.add(parent)
                except json.JSONDecodeError: pass

    # 2. Search Inflected Forms
    sql_form = f"""
        SELECT DISTINCT e.id
        FROM forms f
        JOIN entries e ON f.entry_id = e.id
        WHERE f.form_text IN ({placeholders}) AND e.lang = ?
        AND f.id NOT IN (
            SELECT ft.form_id FROM form_tags ft JOIN tags t ON ft.tag_id = t.id
            WHERE t.tag IN ('variant', 'auxiliary')
        )
    """
    params_form = list(search_variants) + [lang_query]
    form_q = conn.execute(sql_form, params_form).fetchall()
    for row in form_q:
        found_entry_ids.add(row["id"])
        
    # 3. Add Parent Lemmas
    if parent_lemmas_to_find:
        for lemma_word in parent_lemmas_to_find:
            parent_id_q = conn.execute("SELECT id FROM entries WHERE word = ? AND lang = ?", (lemma_word, lang_query)).fetchall()
            for row in parent_id_q: found_entry_ids.add(row["id"])
    
    # =========================================================
    # πŸ” VERBOSE DEBUG DETECTIVE (Triggered on Failure)
    # =========================================================
    if not found_entry_ids:
        log(f"⚠ [DEBUG-VERBOSE] Zero results for '{word}'. Running diagnostics...")
        try:
            # Check 1: Does it exist in ANY language?
            any_lang = conn.execute(
                f"SELECT lang, word FROM entries WHERE word IN ({placeholders}) LIMIT 5", 
                list(search_variants)
            ).fetchall()
            if any_lang:
                found_langs = [f"{r['word']} ({r['lang']})" for r in any_lang]
                log(f"   -> FOUND in other languages/cases: {found_langs}")
                log(f"   -> CONCLUSION: Language filter '{lang_query}' might be too strict.")
            else:
                log(f"   -> NOT FOUND in 'entries' table (any language).")
                
                # Check 2: Does it exist as a form?
                any_form = conn.execute(
                    f"SELECT form_text FROM forms WHERE form_text IN ({placeholders}) LIMIT 1", 
                    list(search_variants)
                ).fetchone()
                if any_form:
                    log(f"   -> FOUND in 'forms' table as '{any_form['form_text']}'! (But failed to link to an English entry)")
                else:
                    log(f"   -> NOT FOUND in 'forms' table either.")

                # Check 3: Is it there but with whitespace issues?
                fuzzy = conn.execute("SELECT word FROM entries WHERE word LIKE ? LIMIT 1", (f"%{word}%",)).fetchone()
                if fuzzy:
                    log(f"   -> PARTIAL MATCH found: '{fuzzy['word']}'. (Check for whitespace/punctuation?)")
                else:
                    log(f"   -> COMPLETELY MISSING from DB.")
        except Exception as e:
            log(f"   -> Detective crashed: {e}")

    log(f"Wiktionary: Found {len(found_entry_ids)} unique matching entries.")
    
    all_reports = []
    for entry_id in found_entry_ids:
        try:
            report = _wiktionary_build_report_for_entry(entry_id, conn)
            all_reports.append(report)
        except Exception as e:
            log(f"Wiktionary: Failed to build report for entry {entry_id}: {e}")
            
    return all_reports

def _wiktionary_format_semantics_block(wikt_report: Dict[str, Any], pattern_block: Dict[str, Any], top_n: int) -> Dict[str, Any]:
    """ Combines English Wiktionary senses with OEWN/ConceptNet. """
    pos_key = _wiktionary_map_pos_key(wikt_report.get("pos"))
    semantic_lemma = wikt_report.get("lemma") 
    
    wiktionary_senses = []
    for sense in wikt_report.get("senses", []):
        wiktionary_senses.append({
            "definition": sense.get("glosses"),
            "source": "wiktionary"
        })

    oewn_senses = []
    if WN_AVAILABLE:
        try:
            senses_by_pos = _get_wordnet_senses_by_pos(semantic_lemma, 'en') 
            oewn_senses_raw = senses_by_pos.get(pos_key, [])
            if oewn_senses_raw and "info" not in oewn_senses_raw[0]:
                oewn_senses = oewn_senses_raw
        except Exception as e:
            log(f"[DEBUG] OEWN lookup failed for {semantic_lemma} ({pos_key}): {e}")

    conceptnet_relations = []
    if REQUESTS_AVAILABLE:
        try:
            conceptnet_result = conceptnet_get_relations(semantic_lemma, language='en')
            conceptnet_relations = conceptnet_result.get("relations", [])
        except Exception: pass
        
    if top_n > 0:
        wiktionary_senses = wiktionary_senses[:top_n]
        oewn_senses = oewn_senses[:top_n]
        conceptnet_relations.sort(key=lambda x: x.get('weight', 0.0), reverse=True)
        conceptnet_relations = conceptnet_relations[:top_n]
    
    return {
        "lemma": semantic_lemma,
        "wiktionary_senses": wiktionary_senses,
        "odenet_senses": oewn_senses, # Key name preserved
        "conceptnet_relations": conceptnet_relations,
        "wiktionary_synonyms": wikt_report.get("synonyms", []),
        "wiktionary_antonyms": wikt_report.get("antonyms", [])
    }


# ============================================================================
# 6e. SHARED SEMANTIC HELPER (OEWN + OpenBLP)
# ============================================================================

def _get_wordnet_senses_by_pos(word: str, lang: str = 'en') -> Dict[str, List[Dict[str, Any]]]:
    """ (Helper) Fetches WordNet (OEWN) senses for a word and groups them by POS. """
    senses_by_pos: Dict[str, List[Dict]] = {
        "noun": [], "verb": [], "adjective": [], "adverb": []
    }
    if not WN_AVAILABLE:
        return {"noun": [{"info": "WordNet unavailable"}], "verb": [{"info": "WordNet unavailable"}],
                "adjective": [{"info": "WordNet unavailable"}], "adverb": [{"info": "WordNet unavailable"}]}
        
    try:
        all_senses = wordnet_get_thesaurus_info(word, lang).get("senses", [])
        for sense in all_senses:
            if "error" in sense: continue
            pos_tag = sense.get("pos")
            
            if pos_tag == 'n': senses_by_pos["noun"].append(sense)
            elif pos_tag == 'v': senses_by_pos["verb"].append(sense)
            elif pos_tag == 'a' or pos_tag == 's': senses_by_pos["adjective"].append(sense)
            elif pos_tag == 'r': senses_by_pos["adverb"].append(sense)
    except Exception as e:
        log(f"WordNet helper check failed for '{word}': {e}")
        
    return senses_by_pos

def _build_semantics_block_for_lemma(lemma: str, pos_key: str, top_n: int, lang: str = 'en') -> Dict[str, Any]:
    """ (REUSABLE HELPER) Fetches OEWN, ConceptNet, and OpenBLP data. """
    log(f"[DEBUG] Building semantics for lemma='{lemma}', pos='{pos_key}', lang='{lang}'")
    
    oewn_senses = []
    if WN_AVAILABLE:
        try:
            senses_by_pos = _get_wordnet_senses_by_pos(lemma, lang)
            oewn_senses_raw = senses_by_pos.get(pos_key, [])
            if oewn_senses_raw and "info" not in oewn_senses_raw[0]:
                oewn_senses = oewn_senses_raw
        except Exception as e:
            log(f"[DEBUG] OEWN lookup failed for {lemma} ({pos_key}): {e}")

    conceptnet_relations = []
    if REQUESTS_AVAILABLE:
        try:
            conceptnet_result = conceptnet_get_relations(lemma, language=lang)
            conceptnet_relations = conceptnet_result.get("relations", [])
        except Exception as e:
            conceptnet_relations = [{"error": str(e)}]
            
    openblp_relations = []
    try:
        openblp_relations = openblp_get_relations(lemma)
    except Exception as e:
        openblp_relations = [{"error": f"OpenBLP stub failed: {e}"}]
            
    if top_n > 0:
        oewn_senses = oewn_senses[:top_n]
        conceptnet_relations.sort(key=lambda x: x.get('weight', 0.0), reverse=True)
        conceptnet_relations = conceptnet_relations[:top_n]
        openblp_relations.sort(key=lambda x: x.get('weight', 0.0), reverse=True)
        openblp_relations = openblp_relations[:top_n]

    return {
        "lemma": lemma,
        "wiktionary_senses": [],
        "odenet_senses": oewn_senses,
        "conceptnet_relations": conceptnet_relations,
        "openblp_relations": openblp_relations,
        "wiktionary_synonyms": [],
        "wiktionary_antonyms": []
    }


# ============================================================================
# 6f. PRIMARY & FALLBACK ENGINES
# ============================================================================

# --- PRIMARY ENGINE: WIKTIONARY (EN) ---
def _analyze_word_with_wiktionary(word: str, top_n: int) -> Dict[str, Any]:
    """ (PRIMARY ENGINE) Analyzes an English word using the Wiktionary DB. """
    final_result: Dict[str, Any] = {"input_word": word, "analysis": {}}
    
    conn = wiktionary_get_connection()
    if not conn: return {}
        
    spacy_pos_hint, spacy_lemma_hint = None, None
    try:
        nlp_en = SPACY_MODELS.get("en")
        if nlp_en:
            doc = nlp_en(word)
            token = doc[0]
            spacy_pos_hint = token.pos_.lower()
            spacy_lemma_hint = token.lemma_
    except Exception: pass

    try:
        wiktionary_reports = _wiktionary_find_all_entries(word, conn)
    except Exception as e:
        log(f"[DEBUG] Wiktionary (EN) query failed: {e}")
        return {} 
    if not wiktionary_reports: return {}

    def get_priority_score(report):
        wikt_pos = _wiktionary_map_pos_key(report.get("pos"))
        wikt_lemma = report.get("lemma")
        if spacy_pos_hint and wikt_pos == spacy_pos_hint:
            if spacy_lemma_hint and wikt_lemma == spacy_lemma_hint: return 1
            return 2
        if wikt_lemma and wikt_lemma.lower() == word.lower(): return 3
        return 4
    
    wiktionary_reports.sort(key=get_priority_score)
    
    word_lower = word.lower()
    for wikt_report in wiktionary_reports:
        # --- FIX START: Safe Extraction ---
        pos_key = _wiktionary_map_pos_key(wikt_report.get("pos"))
        lemma = wikt_report.get("lemma") or word
        pos_title = wikt_report.get("pos_title") or "" # FORCE STRING
        # --- FIX END ---
        
        inflections_wikt_block = {
            "base_form": lemma,
            "forms_list": wikt_report.get("forms", []),
            "source": "wiktionary"
        }
        
        pattern_block = {}
        if PATTERN_EN_AVAILABLE:
            try:
                use_word = word if "form" in pos_title.lower() else lemma
                if pos_key == "noun": pattern_block = pattern_analyze_as_noun_en(use_word)
                elif pos_key == "verb": pattern_block = pattern_analyze_as_verb_en(use_word)
                elif pos_key == "adjective": pattern_block = pattern_analyze_as_adjective_en(use_word)
                elif pos_key == "adverb": pattern_block = {"base_form": lemma, "info": "Adverbs are non-inflecting."}
            except Exception as e:
                pattern_block = {"error": f"Pattern.en analysis failed: {e}"}

        semantics_block = _wiktionary_format_semantics_block(wikt_report, pattern_block, top_n)
        
        pos_entry_report = {
            "inflections_wiktionary": inflections_wikt_block,
            "inflections_pattern": pattern_block,
            "semantics_combined": semantics_block,
            "wiktionary_metadata": {
                 "pos_title": pos_title,
                 "etymology": wikt_report.get("etymology_text") or "",
                 "pronunciation": wikt_report.get("sounds") or "",
            }
        }
        
        is_valid = False
        is_inflected_entry = any(ft in pos_title for ft in ["form", "Comparative", "Superlative"])

        if lemma.lower() == word_lower: is_valid = True
        
        if not is_valid and not is_inflected_entry:
            for form_entry in inflections_wikt_block.get("forms_list", []):
                form_text = form_entry.get("form_text", "").strip()
                if form_text.lower() == word_lower:
                    is_valid = True
                    break
        
        if is_valid:
            if pos_key not in final_result["analysis"]:
                final_result["analysis"][pos_key] = []
            final_result["analysis"][pos_key].append(pos_entry_report)
    
    final_result["info"] = f"Analysis from Wiktionary. Found {len(wiktionary_reports)} raw entries."
    return final_result


# --- FALLBACK 2: STANZA ---
def stanza_get_pipeline_en() -> Optional[stanza.Pipeline]:
    """ Thread-safe function to get the ENGLISH Stanza Pipeline. """
    global STANZA_PIPELINE_EN
    if not STANZA_AVAILABLE:
        raise ImportError("Stanza library is not installed.")
    
    if STANZA_PIPELINE_EN:
        return STANZA_PIPELINE_EN
    
    with STANZA_PIPELINE_LOCK:
        if STANZA_PIPELINE_EN:
            return STANZA_PIPELINE_EN
        try:
            print("Initializing Stanza Pipeline (English)...")
            stanza.download('en', verbose=False, processors='tokenize,pos,lemma')
            pipeline = stanza.Pipeline('en', verbose=False, processors='tokenize,pos,lemma')
            print("βœ“ Stanza Pipeline (English) initialized successfully.")
            STANZA_PIPELINE_EN = pipeline
            return STANZA_PIPELINE_EN
        except Exception as e:
            print(f"CRITICAL ERROR: Failed to initialize Stanza (EN) Pipeline: {e}")
            return None

def _analyze_word_with_stanza(word: str, top_n: int) -> Dict[str, Any]:
    """ (FALLBACK ENGINE 2) Analyzes with Stanza. Must match JSON. """
    if not STANZA_AVAILABLE: return {}
    print(f"\n[Word Encyclopedia] Running Stanza fallback for: \"{word}\"")
    final_result = {"input_word": word, "analysis": {}}
    try:
        pipeline = stanza_get_pipeline_en()
        if not pipeline: return {}
        doc = pipeline(word)
        
        processed_lemmas_pos: Set[Tuple[str, str]] = set()

        for sent in doc.sentences:
            for token in sent.words:
                pos_map = {"NOUN": "noun", "VERB": "verb", "ADJ": "adjective", "ADV": "adverb"}
                if token.pos not in pos_map: continue
                
                pos_key = pos_map[token.pos]
                lemma = token.lemma
                if not lemma: continue

                if (lemma, pos_key) in processed_lemmas_pos: continue
                processed_lemmas_pos.add((lemma, pos_key))
                log(f"--- Analyzing Stanza path: lemma='{lemma}', pos='{pos_key}' ---")

                pattern_block = {}
                if PATTERN_EN_AVAILABLE:
                    if pos_key == "noun": pattern_block = pattern_analyze_as_noun_en(lemma)
                    elif pos_key == "verb": pattern_block = pattern_analyze_as_verb_en(lemma)
                    elif pos_key == "adjective": pattern_block = pattern_analyze_as_adjective_en(lemma)
                    elif pos_key == "adverb": pattern_block = {"base_form": lemma, "info": "Adverbs are non-inflecting."}
                
                semantics_block = _build_semantics_block_for_lemma(lemma, pos_key, top_n, 'en')

                pos_entry_report = {
                    "stanza_analysis": { # <-- New key for this engine
                        "lemma": lemma,
                        "pos_UPOS": token.pos,
                        "pos_XPOS": token.xpos,
                        "morphology": str(token.feats) if token.feats else "",
                        "source": "stanza"
                    },
                    "inflections_pattern": pattern_block,
                    "semantics_combined": semantics_block
                }
                
                if word_appears_in_inflections_en(word, pattern_block, pos_key):
                    if pos_key not in final_result["analysis"]:
                        final_result["analysis"][pos_key] = []
                    final_result["analysis"][pos_key].append(pos_entry_report)
                else:
                    log(f"   βœ— Stanza path {lemma}/{pos_key} REJECTED by validation.")

        if not final_result["analysis"]: return {}
        final_result["info"] = "Analysis from Stanza (Fallback 2)."
        return final_result
    except Exception as e:
        log(f"Stanza Engine FAILED: {e}")
        traceback.print_exc()
        return {}

# --- FALLBACK 3: NLTK ---
def nltk_get_lemmatizer() -> Optional[WordNetLemmatizer]:
    """ Thread-safe function to get the NLTK Lemmatizer. """
    global NLTK_LEMMATIZER
    if not NLTK_AVAILABLE:
        return None # Don't raise error, just return None to trigger graceful fallback
    
    if NLTK_LEMMATIZER:
        return NLTK_LEMMATIZER
        
    with NLTK_LEMMATIZER_LOCK:
        if NLTK_LEMMATIZER:
            return NLTK_LEMMATIZER
        try:
            # Ensure data is present one last time before init
            try:
                nltk.data.find('corpora/wordnet.zip')
            except LookupError:
                nltk.download('wordnet', quiet=True)
                
            NLTK_LEMMATIZER = WordNetLemmatizer()
            # Warm up
            _ = NLTK_LEMMATIZER.lemmatize("cats") 
            print("βœ“ NLTK Lemmatizer initialized.")
            return NLTK_LEMMATIZER
        except Exception as e:
             print(f"βœ— NLTK Init Failed: {e}")
             return None

def _nltk_get_wordnet_pos(treebank_tag):
    """Converts NLTK's Treebank POS tag to a WordNet tag."""
    if treebank_tag.startswith('J'): return nltk_wn.ADJ
    if treebank_tag.startswith('V'): return nltk_wn.VERB
    if treebank_tag.startswith('N'): return nltk_wn.NOUN
    if treebank_tag.startswith('R'): return nltk_wn.ADV
    return None

def _analyze_word_with_nltk(word: str, top_n: int) -> Dict[str, Any]:
    """ (FALLBACK ENGINE 3) Analyzes with NLTK. """
    if not NLTK_AVAILABLE: return {}
    print(f"\n[Word Encyclopedia] Running NLTK fallback for: \"{word}\"")
    final_result = {"input_word": word, "analysis": {}}
        
    try:
        lemmatizer = nltk_get_lemmatizer()
        if not lemmatizer: return {}
        
        # NLTK's POS tagger needs a list
        # This specific call was crashing because 'averaged_perceptron_tagger_eng' was missing
        try:
            tag = nltk.pos_tag([word])[0][1]
        except LookupError:
            # Last ditch attempt to download if it was missing
            nltk.download('averaged_perceptron_tagger_eng', quiet=True)
            tag = nltk.pos_tag([word])[0][1]

        wn_pos = _nltk_get_wordnet_pos(tag)
        
        if not wn_pos: 
            log(f"   βœ— NLTK path REJECTED: Unknown POS tag '{tag}'.")
            return {}
        
        lemma = lemmatizer.lemmatize(word, wn_pos)
        
        # Map NLTK WN constants to strings
        pos_map_rev = {nltk_wn.NOUN: "noun", nltk_wn.VERB: "verb", nltk_wn.ADJ: "adjective", nltk_wn.ADV: "adverb"}
        pos_key = pos_map_rev.get(wn_pos)
        if not pos_key: return {}

        log(f"--- Analyzing NLTK path: lemma='{lemma}', pos='{pos_key}' ---")
        
        pattern_block = {}
        if PATTERN_EN_AVAILABLE:
            # Use the fixed pattern functions from previous step
            if pos_key == "noun": pattern_block = pattern_analyze_as_noun_en(lemma)
            elif pos_key == "verb": pattern_block = pattern_analyze_as_verb_en(lemma)
            elif pos_key == "adjective": pattern_block = pattern_analyze_as_adjective_en(lemma)
            elif pos_key == "adverb": pattern_block = {"base_form": lemma, "info": "Adverbs are non-inflecting."}

        semantics_block = _build_semantics_block_for_lemma(lemma, pos_key, top_n, 'en')
        
        pos_entry_report = {
            "nltk_analysis": {
                "lemma": lemma,
                "pos_Treebank": tag,
                "pos_WordNet": wn_pos,
                "source": "nltk"
            },
            "inflections_pattern": pattern_block,
            "semantics_combined": semantics_block
        }
        
        if word_appears_in_inflections_en(word, pattern_block, pos_key):
            if pos_key not in final_result["analysis"]:
                final_result["analysis"][pos_key] = []
            final_result["analysis"][pos_key].append(pos_entry_report)
        else:
             log(f"   βœ— NLTK path {lemma}/{pos_key} REJECTED by validation.")

        if not final_result["analysis"]: return {}
        final_result["info"] = "Analysis from NLTK (Fallback 3)."
        return final_result
    except Exception as e:
        log(f"NLTK Engine FAILED: {e}")
        # traceback.print_exc() # Optional: Uncomment for deep debugging
        return {}

# --- FALLBACK 4: TEXTBLOB ---
def _analyze_word_with_textblob(word: str, top_n: int) -> Dict[str, Any]:
    """ (FALLBACK ENGINE 4) Analyzes with TextBlob. """
    if not TEXTBLOB_AVAILABLE: return {}
    print(f"\n[Word Encyclopedia] Running TextBlob fallback for: \"{word}\"")
    final_result = {"input_word": word, "analysis": {}}
    
    def get_wordnet_pos_tb(treebank_tag):
        if treebank_tag.startswith('J'): return 'a'
        if treebank_tag.startswith('V'): return 'v'
        if treebank_tag.startswith('N'): return 'n'
        if treebank_tag.startswith('R'): return 'r'
        return None
        
    try:
        try:
            blob = TextBlob(word)
            # This access triggers the tokenizer
            tags = blob.tags 
        except (LookupError, Exception) as e:
            if "punkt" in str(e):
                 print("Attempting to download missing TextBlob/NLTK data...")
                 import nltk
                 nltk.download('punkt_tab', quiet=True)
                 nltk.download('punkt', quiet=True)
                 blob = TextBlob(word)
                 tags = blob.tags
            else:
                raise e

        if not tags: return {}
        
        processed_lemmas_pos: Set[Tuple[str, str]] = set()

        for tb_word, tag in tags:
            tb_pos = get_wordnet_pos_tb(tag)
            if not tb_pos: continue
            
            lemma = tb_word.lemmatize(tb_pos)
            pos_map = {'n': "noun", 'v': "verb", 'a': "adjective", 'r': "adverb"}
            pos_key = pos_map.get(tb_pos)
            if not pos_key: continue

            if (lemma, pos_key) in processed_lemmas_pos: continue
            processed_lemmas_pos.add((lemma, pos_key))
            log(f"--- Analyzing TextBlob path: lemma='{lemma}', pos='{pos_key}' ---")
            
            pattern_block = {}
            if PATTERN_EN_AVAILABLE:
                if pos_key == "noun": pattern_block = pattern_analyze_as_noun_en(lemma)
                elif pos_key == "verb": pattern_block = pattern_analyze_as_verb_en(lemma)
                elif pos_key == "adjective": pattern_block = pattern_analyze_as_adjective_en(lemma)
                elif pos_key == "adverb": pattern_block = {"base_form": lemma, "info": "Adverbs are non-inflecting."}

            semantics_block = _build_semantics_block_for_lemma(lemma, pos_key, top_n, 'en')
            
            pos_entry_report = {
                "textblob_analysis": {
                    "lemma": lemma,
                    "pos_Treebank": tag,
                    "source": "textblob"
                },
                "inflections_pattern": pattern_block,
                "semantics_combined": semantics_block
            }

            if word_appears_in_inflections_en(word, pattern_block, pos_key):
                if pos_key not in final_result["analysis"]:
                    final_result["analysis"][pos_key] = []
                final_result["analysis"][pos_key].append(pos_entry_report)
            else:
                 log(f"   βœ— TextBlob path {lemma}/{pos_key} REJECTED by validation.")

        if not final_result["analysis"]: return {}
        final_result["info"] = "Analysis from TextBlob (Fallback 4)."
        return final_result
    except Exception as e:
        log(f"TextBlob Engine FAILED: {e}")
        return {}


# ============================================================================
# 7. CONSOLIDATED ANALYZER LOGIC
# ============================================================================

# --- 7a. Comprehensive (Contextual) Analyzer ---
def comprehensive_english_analysis(text: str, top_n_value: Optional[float] = 0) -> Dict[str, Any]:
    """
    (CONTEXTUAL) Combines NLP tools for a deep analysis of English text.
    """
    try:
        if not text or not text.strip():
            return {"info": "Please enter text to analyze."}
        top_n = int(top_n_value) if top_n_value is not None else 0
        
        print(f"\n[Comprehensive Analysis (EN)] Starting analysis for: \"{text}\"")
        results: Dict[str, Any] = {"input_text": text}
        nlp_en = None
        context_doc = None

        # --- 1. LanguageTool Grammar Check (default) ---
        print("[Comprehensive Analysis (EN)] Running LanguageTool...")
        if LT_AVAILABLE:
            try:
                results["grammar_check"] = lt_check_grammar(text, 'en')
            except Exception as e:
                results["grammar_check"] = {"error": f"LanguageTool failed: {e}"}
        else:
            results["grammar_check"] = {"error": "LanguageTool not available."}

        # --- 2. spaCy Morpho-Syntactic Backbone ---
        print("[Comprehensive Analysis (EN)] Running spaCy...")
        spacy_json_output = []
        try:
            _, spacy_json, _, _, _ = spacy_get_analysis("en", "en", text) 
            if isinstance(spacy_json, list):
                spacy_json_output = spacy_json
                results["spacy_analysis"] = spacy_json_output
                nlp_en = SPACY_MODELS.get("en")
                if nlp_en:
                    context_doc = nlp_en(text)
                    if not context_doc.has_vector or context_doc.vector_norm == 0:
                        context_doc = None
            else:
                 results["spacy_analysis"] = spacy_json
        except Exception as e:
            results["spacy_analysis"] = {"error": f"spaCy analysis failed: {e}"}

        # --- 2b. Heuristic SVA check (English) ---
        try:
            if isinstance(results.get("grammar_check"), list) and any(d.get("status") == "perfect" for d in results["grammar_check"]):
                subj_num, verb_num, verb_token, subj_token = None, None, None, None
                for tok in spacy_json_output:
                    if tok.get("dependency") == "nsubj":
                        m = tok.get("morphology","")
                        if "Number=Sing" in m: subj_num, subj_token = "Sing", tok
                    spacy_pos_up = (tok.get("pos") or "").upper()
                    if (spacy_pos_up in {"VERB", "AUX"}) and ("VerbForm=Fin" in tok.get("morphology","")):
                        verb_token = tok
                        m = tok.get("morphology","")
                        if "Number=Plur" in m: verb_num = "Plur"
                
                if subj_num == "Sing" and verb_num == "Plur":
                    # ... (Simplified SVA logic for English) ...
                    sva = { "message": "Possible Subject-Verb Agreement Error: Singular subject with plural verb.", "rule_id": "HEURISTIC_SVA_EN", "category": "Grammar", "incorrect_text": f"{verb_token.get('word')}" if verb_token else "", "replacements": [] }
                    results["grammar_check"] = [sva]
        except Exception as e:
            print(f"SVA Heuristic failed: {e}")

        # --- 3. Lemma-by-Lemma Deep Dive ---
        print("[Comprehensive Analysis (EN)] Running Lemma Deep Dive...")
        FUNCTION_POS = {"DET","ADP","AUX","PUNCT","SCONJ","CCONJ","PART","PRON","NUM","SYM","X", "SPACE"}
        lemma_deep_dive: Dict[str, Any] = {}
        processed_lemmas: Set[str] = set()

        if not spacy_json_output:
            print("[Comprehensive Analysis (EN)] No spaCy tokens to analyze.")
        else:
            for token in spacy_json_output:
                lemma = token.get("lemma")
                pos = (token.get("pos") or "").upper()
                
                if not lemma or lemma == "--" or pos in FUNCTION_POS or lemma in processed_lemmas:
                    continue
                processed_lemmas.add(lemma)
                print(f"[Deep Dive (EN)] Analyzing lemma: '{lemma}'")
                
                lemma_report: Dict[str, Any] = {}
                inflection_analysis = {}
                semantic_analysis = {}
                
                try:
                    # --- Call our NEW English dispatcher ---
                    encyclopedia_data = analyze_word_encyclopedia(lemma, 0, "wiktionary", 'en')
                    word_analysis = encyclopedia_data.get("analysis", {})
                    
                    for pos_key, entry_list in word_analysis.items():
                        if not entry_list: continue
                        data = entry_list[0] # Use first, best analysis
                        
                        inflection_analysis[f"{pos_key}_wiktionary"] = data.get("inflections_wiktionary")
                        inflection_analysis[f"{pos_key}_pattern"] = data.get("inflections_pattern")
                        
                        all_senses_for_pos = []
                        semantics_block = data.get("semantics_combined", {})
                        
                        # Add Wiktionary senses
                        wikt_senses = semantics_block.get("wiktionary_senses", [])
                        for s in wikt_senses:
                            s["source"] = "wiktionary"
                            all_senses_for_pos.append(s)

                        # Add OEWN (OdeNet) senses
                        wordnet_senses = semantics_block.get("odenet_senses", [])
                        for s in wordnet_senses:
                            s["source"] = "oewn" # Label it correctly
                            all_senses_for_pos.append(s)

                        semantic_analysis[f"{pos_key}_senses"] = all_senses_for_pos
                        
                        # Add ConceptNet
                        if "conceptnet_relations" not in semantic_analysis:
                            semantic_analysis["conceptnet_relations"] = []
                        semantic_analysis["conceptnet_relations"].extend(
                            semantics_block.get("conceptnet_relations", [])
                        )
                        # Add OpenBLP
                        if "openblp_relations" not in semantic_analysis:
                            semantic_analysis["openblp_relations"] = []
                        semantic_analysis["openblp_relations"].extend(
                            semantics_block.get("openblp_relations", [])
                        )


                    lemma_report["inflection_analysis"] = inflection_analysis
                    
                except Exception as e:
                    lemma_report["inflection_analysis"] = {"error": f"Analyzer failed: {e}"}


                # --- 3b. Contextual Re-ranking ---
                # (This logic is identical, it just needs the `nlp_en` model)
                if nlp_en and context_doc:
                    # Rank Senses (Wiktionary + OEWN)
                    for key in semantic_analysis:
                        if key.endswith("_senses"):
                            ranked_senses = []
                            for sense in semantic_analysis[key]:
                                if "error" in sense: continue
                                definition = sense.get("definition", "")
                                relevance = 0.0
                                if definition:
                                    try:
                                        def_doc = nlp_en(definition)
                                        if def_doc.has_vector and def_doc.vector_norm > 0:
                                            relevance = context_doc.similarity(def_doc)
                                    except Exception: relevance = 0.0
                                sense["relevance_score"] = float(relevance)
                                ranked_senses.append(sense)
                            
                            ranked_senses.sort(key=lambda x: x.get('relevance_score', 0.0), reverse=True)
                            if top_n > 0:
                                ranked_senses = ranked_senses[:top_n]
                            semantic_analysis[key] = ranked_senses

                    # Rank Relations (ConceptNet, OpenBLP)
                    for key in ["conceptnet_relations", "openblp_relations"]:
                        if key in semantic_analysis:
                            ranked_relations = []
                            for rel in semantic_analysis[key]:
                                if "error" in rel: continue
                                text_to_score = rel.get('surface') or rel.get('other_node', '')
                                relevance = 0.0
                                if text_to_score:
                                    try:
                                        rel_doc = nlp_en(text_to_score)
                                        if rel_doc.has_vector and rel_doc.vector_norm > 0:
                                            relevance = context_doc.similarity(rel_doc)
                                    except Exception: relevance = 0.0
                                rel["relevance_score"] = float(relevance)
                                ranked_relations.append(rel)
                            
                            ranked_relations.sort(key=lambda x: x.get('relevance_score', 0.0), reverse=True)
                            if top_n > 0:
                                ranked_relations = ranked_relations[:top_n]
                            semantic_analysis[key] = ranked_relations

                lemma_report["semantic_analysis"] = semantic_analysis
                lemma_deep_dive[lemma] = lemma_report
                
        results["lemma_deep_dive"] = lemma_deep_dive
        print("[Comprehensive Analysis (EN)] Analysis complete.")
        return results
    
    except Exception as e:
        print(f"[Comprehensive Analysis (EN)] FATAL ERROR: {e}")
        return {
            "error": f"Analysis failed: {str(e)}",
            "traceback": traceback.format_exc(),
        }

# --- 7b. Word Encyclopedia (Non-Contextual) Analyzer ---
def analyze_word_encyclopedia(word: str, top_n_value: Optional[float] = 0, engine_choice: str = "wiktionary", lang: str = 'en') -> Dict[str, Any]:
    """
    (PUBLIC DISPATCHER EN) Analyzes a single English word.
    Chain: Wiktionary -> HanTa -> Stanza -> NLTK -> TextBlob
    """
    if lang != 'en': return {"error": "This is the English app."}
    if not word or not word.strip(): return {"info": "Please enter a word."}
    
    word = word.strip()
    top_n = int(top_n_value) if top_n_value is not None else 0
    result = {}
    info_log = [] 
    
    # Define the full chain of engines to try
    engine_functions = {
        "wiktionary": _analyze_word_with_wiktionary,
        "hanta": _analyze_word_with_hanta_en,
        "stanza": _analyze_word_with_stanza,
        "nltk": _analyze_word_with_nltk,
        "textblob": _analyze_word_with_textblob
    }
    
    # Start the chain based on user's choice
    start_engines = list(engine_functions.keys())
    if engine_choice in start_engines:
        start_index = start_engines.index(engine_choice)
        start_engines = start_engines[start_index:]
    else:
        start_engines = list(engine_functions.keys()) # Default to full chain

    try:
        for engine_name in start_engines:
            log(f"[DEBUG] EN Dispatcher: Trying Engine '{engine_name}' for '{word}'...")
            if not engine_functions[engine_name]:
                info_log.append(f"{engine_name} is not available.")
                continue
                
            engine_func = engine_functions[engine_name]
            result = engine_func(word, top_n)
            
            if result and result.get("analysis"):
                # Success!
                if info_log:
                    result["info"] = f"{result.get('info', '')} (Fallbacks: {' '.join(info_log)})"
                return result
                
            info_log.append(f"{engine_name} found no results.")
            log(f"[DEBUG] EN Dispatcher: Engine '{engine_name}' found no results. Falling back...")

    except Exception as e:
        log(f"--- Dispatcher FAILED for engine {engine_choice}: {e} ---")
        traceback.print_exc()
        return { "error": f"An engine failed during analysis.", "traceback": traceback.format_exc() }

    # --- No engines found anything ---
    return {
        "input_word": word,
        "info": f"No analysis found. All engines failed. ({' '.join(info_log)})"
    }


# ============================================================================
# 7.5 VISUALIZATION & HTML HELPERS (NEW)
# ============================================================================

HTML_CSS = """
<style>
    /* Card Container - High Contrast */
    .ling-card { 
        font-family: 'Segoe UI', Roboto, Helvetica, Arial, sans-serif; 
        border: 1px solid #d1d5db; /* Darker border */
        border-radius: 8px; 
        padding: 20px; 
        margin-bottom: 20px; 
        background: #ffffff; 
        box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06); 
    }

    /* Header Section */
    .ling-header { 
        display: flex; 
        align-items: center; 
        margin-bottom: 15px; 
        border-bottom: 2px solid #e5e7eb; 
        padding-bottom: 10px; 
    }
    .ling-lemma { 
        font-size: 1.8em; 
        font-weight: 800; 
        color: #111827; /* Almost Black */
        margin-right: 12px; 
    }
    .ling-pos { 
        font-size: 0.85em; 
        text-transform: uppercase; 
        font-weight: 700; 
        padding: 4px 10px; 
        border-radius: 6px; 
        color: #fff; 
        letter-spacing: 0.05em;
    }
    
    /* POS Colors */
    .pos-noun { background-color: #2563eb; }      /* Blue */
    .pos-verb { background-color: #059669; }      /* Green */
    .pos-adj { background-color: #d97706; }       /* Amber */
    .pos-adv { background-color: #7c3aed; }       /* Purple */
    .pos-name { background-color: #db2777; }      /* Pink (Proper Noun) */
    .pos-other { background-color: #4b5563; }     /* Gray */

    /* Section Headers */
    .ling-section { margin-top: 15px; }
    .ling-subtitle { 
        font-size: 0.85em; 
        font-weight: 700; 
        color: #374151; /* Dark Gray */
        text-transform: uppercase; 
        margin-bottom: 8px; 
        border-left: 4px solid #3b82f6;
        padding-left: 8px;
    }
    
    /* Tables */
    .inflection-table { width: 100%; font-size: 0.95em; border-collapse: collapse; margin-bottom: 10px; }
    .inflection-table td { padding: 6px 10px; border-bottom: 1px solid #e5e7eb; color: #1f2937; }
    .inflection-label { color: #6b7280; font-weight: 600; width: 35%; background: #f9fafb; }
    
    /* Senses */
    .sense-item { margin-bottom: 8px; line-height: 1.5; font-size: 1em; color: #1f2937; }
    .source-badge { 
        display: inline-block; font-size: 0.75em; font-weight: bold; padding: 2px 6px; 
        border-radius: 4px; border: 1px solid; margin-right: 8px; vertical-align: middle; 
    }
    .src-wikt { background: #fff1f2; color: #9f1239; border-color: #fda4af; }
    .src-oewn { background: #eff6ff; color: #1e40af; border-color: #93c5fd; }
    
    /* Relations Chips */
    .rel-chip { 
        display: inline-block; 
        background: #f3f4f6; 
        color: #1f2937; /* Force Dark Text */
        padding: 4px 10px; 
        border-radius: 15px; 
        font-size: 0.9em; 
        margin: 3px; 
        border: 1px solid #d1d5db; 
        font-weight: 500;
    }
    .rel-type { color: #6b7280; font-size: 0.8em; margin-right: 4px; font-weight: 700; text-transform: lowercase;}
    
    /* Collapsible */
    .kg-details > summary { 
        cursor: pointer; color: #2563eb; font-size: 0.9em; font-weight: 600; 
        margin-top: 10px; padding: 6px; border-radius: 4px; width: fit-content;
    }
    .kg-details > summary:hover { text-decoration: underline; background: #eff6ff; }
    .kg-content { margin-top: 10px; padding: 10px; background: #f9fafb; border-radius: 8px; border: 1px solid #e5e7eb; }
</style>
"""

def _format_word_analysis_html(data: Dict[str, Any]) -> str:
    """ Generates HTML for a single word analysis result. """
    if not data or "analysis" not in data:
        return f"{HTML_CSS}<div class='ling-card'>No analysis data available. {data.get('info', '')}</div>"
    
    html = HTML_CSS
    analysis = data["analysis"]
    
    # Iterate over POS
    for pos_key, entries in analysis.items():
        if not entries: continue
        entry = entries[0] # Take best candidate
        
        # --- POS Display Logic ---
        # Map internal keys to nice display names and CSS classes
        display_pos = pos_key.upper()
        css_class = "pos-other"
        
        if pos_key == 'noun': css_class = "pos-noun"
        elif pos_key == 'verb': css_class = "pos-verb"
        elif pos_key == 'adj' or pos_key == 'adjective': 
            css_class = "pos-adj"
            display_pos = "ADJECTIVE"
        elif pos_key == 'adv' or pos_key == 'adverb': 
            css_class = "pos-adv"
            display_pos = "ADVERB"
        elif pos_key == 'name': 
            css_class = "pos-name"
            display_pos = "PROPER NOUN"
        
        # Data Extraction
        inf_wikt = entry.get("inflections_wiktionary") or {}
        inf_pat = entry.get("inflections_pattern") or {}
        sem_comb = entry.get("semantics_combined") or {}
        
        lemma = inf_wikt.get("base_form") or \
                inf_pat.get("base_form") or \
                sem_comb.get("lemma") or \
                data.get("input_word") or "?"

        # --- CARD START ---
        html += f"""
        <div class="ling-card">
            <div class="ling-header">
                <span class="ling-lemma">{lemma}</span>
                <span class="ling-pos {css_class}">{display_pos}</span>
            </div>
        """
        
        # --- Inflections Section ---
        html += "<div class='ling-section'><div class='ling-subtitle'>Morphology & Inflections</div>"
        html += "<table class='inflection-table'>"
        
        # We check Pattern data first. If it's empty, we show '-' or rely on Wiktionary forms.
        has_pattern_data = bool(inf_pat) and "error" not in inf_pat
        
        if pos_key == 'noun':
            html += f"<tr><td class='inflection-label'>Singular</td><td>{inf_pat.get('singular', lemma if not has_pattern_data else '-')}</td></tr>"
            html += f"<tr><td class='inflection-label'>Plural</td><td>{inf_pat.get('plural', '-')}</td></tr>"
            if has_pattern_data:
                html += f"<tr><td class='inflection-label'>Context</td><td>{inf_pat.get('article', '-')}</td></tr>"
        
        elif pos_key == 'verb':
            cj = inf_pat.get('conjugation') or {}
            pres = cj.get('Present') or {}
            past = cj.get('Past') or {}
            parts = inf_pat.get('participles') or {}
            html += f"<tr><td class='inflection-label'>Infinitive</td><td>{inf_pat.get('infinitive', lemma)}</td></tr>"
            html += f"<tr><td class='inflection-label'>3rd Person (He/She)</td><td>{pres.get('he/she (3sg)', '-')}</td></tr>"
            html += f"<tr><td class='inflection-label'>Past Simple</td><td>{past.get('General', '-')}</td></tr>"
            html += f"<tr><td class='inflection-label'>Participle (Ing)</td><td>{parts.get('Present Participle (gerund)', '-')}</td></tr>"
            html += f"<tr><td class='inflection-label'>Participle (Past)</td><td>{parts.get('Past Participle', '-')}</td></tr>"

        elif pos_key in ['adjective', 'adj']:
            gr = inf_pat.get('grading') or {}
            html += f"<tr><td class='inflection-label'>Positive</td><td>{gr.get('Positive', lemma)}</td></tr>"
            html += f"<tr><td class='inflection-label'>Comparative</td><td>{gr.get('Comparative', '-')}</td></tr>"
            html += f"<tr><td class='inflection-label'>Superlative</td><td>{gr.get('Superlative', '-')}</td></tr>"

        # Wiktionary Forms (The "Other Forms" box)
        forms_list = inf_wikt.get("forms_list") or []
        if forms_list:
            # Extract text carefully
            forms_str_list = []
            for f in forms_list[:8]: # Show up to 8 forms
                txt = f.get('form_text')
                tags = f.get('tags')
                if txt:
                    # Append tag if available e.g., "Readys (plural)"
                    display_txt = f"{txt} <small style='color:#6b7280'>({tags})</small>" if tags else txt
                    forms_str_list.append(display_txt)
            
            if forms_str_list:
                html += f"<tr><td class='inflection-label'>Forms (DB)</td><td>{', '.join(forms_str_list)}</td></tr>"
        
        html += "</table></div>"
        
        # --- Semantics Section ---
        html += "<div class='ling-section'><div class='ling-subtitle'>Definitions & Senses</div>"
        
        wikt_senses = sem_comb.get("wiktionary_senses") or []
        oewn_senses = sem_comb.get("odenet_senses") or []
        
        if not wikt_senses and not oewn_senses:
             html += "<div class='sense-item'><i>No definitions found.</i></div>"

        for s in wikt_senses[:3]: 
            gloss_raw = s.get("definition") or "" 
            gloss = str(gloss_raw).replace(";", "<br>") # Ensure string
            if gloss:
                html += f"<div class='sense-item'><span class='source-badge src-wikt'>Wikt</span> {gloss}</div>"
            
        for s in oewn_senses[:3]:
            defi = s.get("definition") or ""
            if defi:
                html += f"<div class='sense-item'><span class='source-badge src-oewn'>OEWN</span> {defi}</div>"
            
        html += "</div>"
        
        # --- Relations Section ---
        rels = sem_comb.get("conceptnet_relations") or []
        if rels:
            html += "<div class='ling-section'><div class='ling-subtitle'>Knowledge Graph</div>"
            
            top_n = 5
            visible_rels = rels[:top_n]
            hidden_rels = rels[top_n:]
            
            def render_rel(r):
                # Robust extraction
                rel_name = r.get("relation", "Rel")
                # Prefer other_node, fall back to parsing surface, fall back to '?'
                target = r.get("other_node") or "?"
                # Clean up surface text if needed
                if target == "?" and "surface" in r:
                     parts = str(r["surface"]).split()
                     if len(parts) > 2: target = parts[-1] 
                
                return f"<span class='rel-chip'><span class='rel-type'>{rel_name}:</span> {target}</span>"

            html += "<div>"
            for r in visible_rels:
                html += render_rel(r)
            html += "</div>"

            if hidden_rels:
                html += f"""
                <details class='kg-details'>
                    <summary>Show {len(hidden_rels)} more relations</summary>
                    <div class='kg-content'>
                """
                for r in hidden_rels:
                    html += render_rel(r)
                html += "</div></details>"
            
            html += "</div>"
            
        html += "</div>" # End Card

    return html

def _format_comprehensive_html(data: Dict[str, Any]) -> str:
    """ Generates HTML for the comprehensive sentence analysis. """
    if "error" in data:
        return f"<div style='color:red'>{data['error']}</div>"
        
    html = HTML_CSS
    
    # 1. Grammar Check Banner
    gc = data.get("grammar_check", [])
    if isinstance(gc, list) and len(gc) == 1 and gc[0].get("status") == "perfect":
        html += "<div class='grammar-alert alert-green'><strong>βœ“ Grammar Check Passed:</strong> No obvious errors detected.</div>"
    elif isinstance(gc, list) and gc:
        html += "<div class='grammar-alert alert-red'><strong>⚠ Grammar Issues Detected:</strong><br>"
        for err in gc:
            msg = err.get("message", "Error")
            bad = err.get("incorrect_text", "")
            html += f"β€’ {msg} (in: '<em>{bad}</em>')<br>"
        html += "</div>"
        
    # 2. Lemma Deep Dive Accordion
    deep_dive = data.get("lemma_deep_dive", {})
    if not deep_dive:
        html += "<p>No deep analysis available.</p>"
    else:
        html += "<h3>Word-by-Word Analysis</h3>"
        for lemma, details in deep_dive.items():
            # Construct a fake "single word" object to reuse the formatting function
            # We need to reshape the deep_dive structure slightly to match the expected format
            # The deep dive has keys "inflection_analysis" and "semantic_analysis".
            # We need to map this back to { "analysis": { "pos": [ entry... ] } }
            
            # This is a bit tricky because deep_dive separates inflection from semantics 
            # while the word analyzer groups them by POS entry. 
            # We will generate a simplified view here.
            
            html += f"<details><summary>{lemma}</summary>"
            
            inflections = details.get("inflection_analysis", {})
            semantics = details.get("semantic_analysis", {})
            
            # We need to guess the POS keys present
            all_keys = set([k.split('_')[0] for k in inflections.keys()])
            
            reconstructed_data = {"analysis": {}}
            
            for pos in all_keys:
                entry = {
                    "inflections_wiktionary": inflections.get(f"{pos}_wiktionary"),
                    "inflections_pattern": inflections.get(f"{pos}_pattern"),
                    "semantics_combined": {
                        "lemma": lemma,
                        "wiktionary_senses": [s for s in semantics.get(f"{pos}_senses", []) if s.get('source') == 'wiktionary'],
                        "odenet_senses": [s for s in semantics.get(f"{pos}_senses", []) if s.get('source') == 'oewn'],
                        "conceptnet_relations": semantics.get("conceptnet_relations", [])
                    }
                }
                reconstructed_data["analysis"][pos] = [entry]
                
            html += _format_word_analysis_html(reconstructed_data)
            html += "</details>"
            
    return html

# ============================================================================
# 8. GRADIO UI CREATION (Adapted for English)
# ============================================================================

def create_spacy_tab():
    """Creates the UI for the spaCy tab."""
    config = SPACY_UI_TEXT["en"]
    model_choices = list(SPACY_MODEL_INFO.keys())
    with gr.Row():
        ui_lang_radio = gr.Radio(["DE", "EN", "ES"], label=config["ui_lang_label"], value="EN")
        model_lang_radio = gr.Radio(
            choices=[(SPACY_MODEL_INFO[k][0], k) for k in model_choices],
            label=config["model_lang_label"],
            value="en" # <-- Default to English
        )
    markdown_title = gr.Markdown(config["title"])
    markdown_subtitle = gr.Markdown(config["subtitle"])
    text_input = gr.Textbox(label=config["input_label"], placeholder=config["input_placeholder"], lines=5)
    analyze_button = gr.Button(config["button_text"], variant="primary")
    with gr.Tabs():
        with gr.Tab(config["tab_graphic"]) as tab_graphic:
            html_dep_out = gr.HTML(label=config["html_label"])
        with gr.Tab(config["tab_ner"]) as tab_ner:
            html_ner_out = gr.HTML(label=config["ner_label"])
        with gr.Tab(config["tab_table"]) as tab_table:
            df_out = gr.DataFrame(label=config["table_label"], headers=config["table_headers"], interactive=False)
        with gr.Tab(config["tab_json"]) as tab_json:
            json_out = gr.JSON(label=config["json_label"])
            
    analyze_button.click(fn=spacy_get_analysis,
                         inputs=[ui_lang_radio, model_lang_radio, text_input],
                         outputs=[df_out, json_out, html_dep_out, html_ner_out, analyze_button],
                         api_name="get_morphology")
    
    ui_lang_radio.change(fn=spacy_update_ui,
                         inputs=ui_lang_radio,
                         outputs=[markdown_title, markdown_subtitle, ui_lang_radio, model_lang_radio,
                                  text_input, analyze_button, tab_graphic, tab_table, tab_json, tab_ner,
                                  html_dep_out, df_out, json_out, html_ner_out])

def create_languagetool_tab():
    """Creates the UI for the Grammar Checker tab with LT."""
    gr.Markdown("# πŸ‡¬πŸ‡§ English Grammar & Spelling Checker")
    gr.Markdown("Powered by `LanguageTool`.")
    
    with gr.Row():
        text_input = gr.Textbox(
            label="English Text to Check",
            placeholder="e.g., I seen the man. This is a houze.",
            lines=5,
            scale=3
        )
        
    check_button = gr.Button("Check Text", variant="primary")
    output = gr.JSON(label="Detected Errors (JSON)")

    check_button.click(
        fn=lambda text: lt_check_grammar(text, 'en'),
        inputs=[text_input],
        outputs=[output],
        api_name="check_grammar"
    )
    gr.Examples(
        [["This is a houze."], ["I seen the man."],
         ["The cat sleep on the table."], ["He asks if he can go."]],
        inputs=[text_input], outputs=[output], fn=lambda text: lt_check_grammar(text, 'en'),
        cache_examples=False
    )

def create_wordnet_tab():
    """Creates the UI for the OEWN tab."""
    gr.Markdown("# πŸ‡¬πŸ‡§ English Thesaurus (OEWN) Service")
    gr.Markdown("Powered by `wn` and `Open English WordNet (oewn)`.")
    with gr.Column():
        word_input = gr.Textbox(
            label="English Word",
            placeholder="e.g., house, fast, good, cat"
        )
        check_button = gr.Button("Find Relations", variant="primary")
    output = gr.JSON(label="Thesaurus Information (JSON)")
    
    check_button.click(
        fn=lambda word: wordnet_get_thesaurus_info(word, 'en'),
        inputs=[word_input],
        outputs=[output],
        api_name="get_thesaurus"
    )
    gr.Examples(
        [["dog"], ["good"], ["run"], ["house"], ["fast"]],
        inputs=[word_input], outputs=[output], fn=lambda word: wordnet_get_thesaurus_info(word, 'en'),
        cache_examples=False
    )

def create_pattern_tab():
    """Creates the UI for the Pattern.en tab."""
    gr.Markdown("# πŸ‡¬πŸ‡§ Complete English Word Inflection System")
    gr.Markdown("Powered by `pattern.en`. Generates inflection tables.")
    with gr.Column():
        word_input = gr.Textbox(
            label="English Word",
            placeholder="e.g., house, go, beautiful, better, went, cat"
        )
        generate_button = gr.Button("Generate All Forms", variant="primary")
    output = gr.JSON(label="Complete Inflection Analysis")
    
    generate_button.click(
        fn=lambda word: pattern_get_all_inflections(word, 'en'),
        inputs=[word_input],
        outputs=[output],
        api_name="get_all_inflections"
    )
    gr.Examples(
        [["house"], ["go"], ["beautiful"], ["better"], ["went"], ["cat"], ["run"]],
        inputs=[word_input], outputs=[output], fn=lambda word: pattern_get_all_inflections(word, 'en'),
        cache_examples=False
    )

def create_conceptnet_tab():
    """--- Creates the UI for the ConceptNet tab ---"""
    gr.Markdown("# 🌍 ConceptNet Knowledge Graph (Direct API)")
    gr.Markdown("Fetches semantic relations for a word in any language.")
    with gr.Row():
        word_input = gr.Textbox(label="Word or Phrase", placeholder="e.g., tree, Katze")
        lang_input = gr.Textbox(label="Language Code", value="en") # <-- Default to 'en'
    check_button = gr.Button("Find Relations", variant="primary")
    output = gr.JSON(label="ConceptNet Relations (JSON)")

    check_button.click(
        fn=conceptnet_get_relations,
        inputs=[word_input, lang_input],
        outputs=[output],
        api_name="get_conceptnet"
    )
    gr.Examples(
        [["tree", "en"], ["Baum", "de"], ["cat", "en"], ["gato", "es"]],
        inputs=[word_input, lang_input], outputs=[output], fn=conceptnet_get_relations,
        cache_examples=False
    )

def create_openblp_tab():
    """--- Creates the UI for the OpenBLP tab ---"""
    gr.Markdown("# πŸ”— OpenBLP Knowledge Graph (Stub)")
    gr.Markdown("Stub component to query OpenBLP relations.")
    with gr.Column():
        word_input = gr.Textbox(
            label="English Lemma",
            placeholder="e.g., dog, cat"
        )
        check_button = gr.Button("Find Relations", variant="primary")
    output = gr.JSON(label="OpenBLP Relations (JSON)")
    check_button.click(
        fn=openblp_get_relations,
        inputs=[word_input],
        outputs=[output],
        api_name="get_openblp"
    )
    gr.Examples(
        [["dog"], ["cat"], ["house"]],
        inputs=[word_input], outputs=[output], fn=openblp_get_relations,
        cache_examples=False
    )

def create_combined_tab():
    """Creates the UI for the CONTEXTUAL Comprehensive Analyzer tab."""
    gr.Markdown("# πŸš€ Comprehensive Analyzer (Contextual - EN)")
    gr.Markdown("This tool provides a deep, **lemma-based** analysis *in context* for English.")
    with gr.Column():
        text_input = gr.Textbox(
            label="English Text",
            placeholder="e.g., The quick brown fox jumps over the lazy dog.",
            lines=5
        )
        top_n_number = gr.Number(
            label="Limit Semantic Senses per POS (0 for all)",
            value=0, step=1, minimum=0, interactive=True
        )
        analyze_button = gr.Button("Run Comprehensive Analysis", variant="primary")
    
    status_output = gr.Markdown(value="", visible=True)
    # --- CHANGED: Added HTML output ---
    html_output = gr.HTML(label="Visual Report")
    json_output = gr.JSON(label="Raw JSON Data")
    
    # --- CHANGED: Wrapper to return Status, HTML, and JSON ---
    def run_analysis_with_status_visual(text, top_n):
        try:
            status = "πŸ”„ Analyzing..."
            yield status, "", {} # Clear outputs
            
            result = comprehensive_english_analysis(text, top_n)
            
            # Generate HTML
            html = _format_comprehensive_html(result)
            
            status = f"βœ… Analysis complete! Found {len(result.get('lemma_deep_dive', {}))} lemmas."
            yield status, html, result
        except Exception as e:
            error_status = f"❌ Error: {str(e)}"
            yield error_status, f"<div style='color:red'>{str(e)}</div>", {"error": str(e), "traceback": traceback.format_exc()}
    
    analyze_button.click(
        fn=run_analysis_with_status_visual,
        inputs=[text_input, top_n_number],
        outputs=[status_output, html_output, json_output],
        api_name="comprehensive_analysis"
    )
    
    gr.Examples(
        [["The cat sleeps on the table.", 3]],
        inputs=[text_input, top_n_number],
        outputs=[status_output, html_output, json_output], 
        fn=run_analysis_with_status_visual,
        cache_examples=False
    )

def create_word_encyclopedia_tab():
    """--- UI for the NON-CONTEXTUAL Word Analyzer tab ---"""
    gr.Markdown("# πŸ“– Word Encyclopedia (Non-Contextual - EN)")
    gr.Markdown("Analyzes a **single English word** for all possible forms, using a chain of engines.")
    
    with gr.Column():
        word_input = gr.Textbox(
            label="Single English Word",
            placeholder="e.g., run, water, fast, beautiful"
        )
        
        with gr.Row():
            top_n_number = gr.Number(
                label="Limit Semantic Senses per POS (0 for all)",
                value=0, step=1, minimum=0, interactive=True
            )
            
            engine_radio = gr.Radio(
                label="Select Analysis Engine",
                choices=[
                    ("Wiktionary (Default)", "wiktionary"),
                    ("HanTa (EN)", "hanta"), 
                    ("Stanza", "stanza"),
                    ("NLTK", "nltk"),
                    ("TextBlob", "textblob"),
                ],
                value="wiktionary",
                interactive=True
            )

        analyze_button = gr.Button("Analyze Word", variant="primary")
    
    # --- CHANGED: Added HTML output component ---
    html_output = gr.HTML(label="Visual Report")
    json_output = gr.JSON(label="Raw JSON Data")
    
    # --- CHANGED: Wrapper function to return both HTML and JSON ---
    def run_word_visual(word, top_n, engine):
        data = analyze_word_encyclopedia(word, top_n, engine, 'en')
        html = _format_word_analysis_html(data)
        return html, data

    analyze_button.click(
        fn=run_word_visual, # Use wrapper
        inputs=[word_input, top_n_number, engine_radio], 
        outputs=[html_output, json_output], # Output to both
        api_name="analyze_word"
    )
    
    gr.Examples(
        [["run", 3, "wiktionary"], ["water", 0, "wiktionary"]],
        inputs=[word_input, top_n_number, engine_radio],
        outputs=[html_output, json_output], 
        fn=run_word_visual,
        cache_examples=False
    )

# --- Standalone Engine Tabs ---
def create_wiktionary_tab():
    gr.Markdown("# πŸ“™ Wiktionary Lookup (Raw Engine - EN)")
    gr.Markdown("Directly query the English Wiktionary (Primary) engine.")
    word_input = gr.Textbox(label="Single English Word", placeholder="e.g., house, go, today")
    analyze_button = gr.Button("Lookup Word in Wiktionary", variant="primary")
    output = gr.JSON(label="Wiktionary Engine Analysis (JSON)")
    analyze_button.click(
        fn=lambda word: _analyze_word_with_wiktionary(word, 0),
        inputs=[word_input], outputs=[output], api_name="wiktionary_lookup"
    )
    gr.Examples([["house"], ["go"], ["today"], ["run"]], inputs=[word_input], outputs=[output], 
                fn=lambda word: _analyze_word_with_wiktionary(word, 0), cache_examples=False)

def create_hanta_tab():
    gr.Markdown("# πŸ€– HanTa Lookup (Raw Engine - EN)")
    gr.Markdown("Directly query the HanTa (EN) (Fallback 1) engine.")
    word_input = gr.Textbox(label="Single English Word", placeholder="e.g., running, houses, unhappiest")
    analyze_button = gr.Button("Lookup Word with HanTa", variant="primary")
    output = gr.JSON(label="HanTa Engine Analysis (JSON)")
    analyze_button.click(
        fn=lambda word: _analyze_word_with_hanta_en(word, 0),
        inputs=[word_input], outputs=[output], api_name="hanta_lookup"
    )
    gr.Examples([["running"], ["houses"], ["unhappiest"], ["fast"]], inputs=[word_input], outputs=[output], 
                fn=lambda word: _analyze_word_with_hanta_en(word, 0), cache_examples=False)

def create_stanza_tab():
    gr.Markdown("# πŸ›οΈ Stanza Lookup (Raw Engine - EN)")
    gr.Markdown("Directly query the Stanza (Fallback 2) engine.")
    word_input = gr.Textbox(label="Single English Word", placeholder="e.g., ran, better, was")
    analyze_button = gr.Button("Lookup Word with Stanza", variant="primary")
    output = gr.JSON(label="Stanza Engine Analysis (JSON)")
    analyze_button.click(
        fn=lambda word: _analyze_word_with_stanza(word, 0),
        inputs=[word_input], outputs=[output], api_name="stanza_lookup"
    )
    gr.Examples([["ran"], ["better"], ["was"], ["dogs"]], inputs=[word_input], outputs=[output], 
                fn=lambda word: _analyze_word_with_stanza(word, 0), cache_examples=False)

def create_nltk_tab():
    gr.Markdown("# πŸ“š NLTK Lookup (Raw Engine - EN)")
    gr.Markdown("Directly query the NLTK (Fallback 3) engine.")
    word_input = gr.Textbox(label="Single English Word", placeholder="e.g., corpora, went")
    analyze_button = gr.Button("Lookup Word with NLTK", variant="primary")
    output = gr.JSON(label="NLTK Engine Analysis (JSON)")
    analyze_button.click(
        fn=lambda word: _analyze_word_with_nltk(word, 0),
        inputs=[word_input], outputs=[output], api_name="nltk_lookup"
    )
    gr.Examples([["corpora"], ["went"], ["best"], ["running"]], inputs=[word_input], outputs=[output], 
                fn=lambda word: _analyze_word_with_nltk(word, 0), cache_examples=False)

def create_textblob_tab():
    gr.Markdown("# πŸ’¬ TextBlob Lookup (Raw Engine - EN)")
    gr.Markdown("Directly query the TextBlob (Fallback 4) engine.")
    word_input = gr.Textbox(label="Single English Word", placeholder="e.g., worse, cacti")
    analyze_button = gr.Button("Lookup Word with TextBlob", variant="primary")
    output = gr.JSON(label="TextBlob Engine Analysis (JSON)")
    analyze_button.click(
        fn=lambda word: _analyze_word_with_textblob(word, 0),
        inputs=[word_input], outputs=[output], api_name="textblob_lookup"
    )
    gr.Examples([["worse"], ["cacti"], ["spoke"], ["fastest"]], inputs=[word_input], outputs=[output], 
                fn=lambda word: _analyze_word_with_textblob(word, 0), cache_examples=False)


# --- Main UI Builder ---
def create_consolidated_interface():
    """Builds the final Gradio app with all tabs."""
    with gr.Blocks(title="Consolidated Linguistics Hub (EN)", theme=gr.themes.Soft()) as demo:
        gr.Markdown("# πŸ›οΈ Consolidated Linguistics Hub (ENGLISH)")
        gr.Markdown("A suite of advanced tools for English linguistics, built on OEWN, Stanza, NLTK, TextBlob, and more.")
        
        with gr.Tabs():
            # --- Main Tools ---
            with gr.Tab("πŸ“– Word Encyclopedia (EN)"):
                create_word_encyclopedia_tab()
                
            with gr.Tab("πŸš€ Comprehensive Analyzer (EN)"):
                create_combined_tab()

            with gr.Tab("πŸ”¬ spaCy Analyzer (Multi-lingual)"):
                create_spacy_tab()
            
            with gr.Tab("βœ… Grammar Check (EN)"):
                create_languagetool_tab()
            
            # --- Standalone Engine Tabs (NEW & EXPANDED) ---
            with gr.Tab("πŸ“™ Engine: Wiktionary (EN)"):
                create_wiktionary_tab()

            with gr.Tab("πŸ€– Engine: HanTa (EN)"):
                create_hanta_tab()

            with gr.Tab("πŸ›οΈ Engine: Stanza (EN)"):
                create_stanza_tab()
                
            with gr.Tab("πŸ“š Engine: NLTK (EN)"):
                create_nltk_tab()

            with gr.Tab("πŸ’¬ Engine: TextBlob (EN)"):
                create_textblob_tab()
                
            # --- Standalone Component Tabs ---
            with gr.Tab("πŸ“š Component: Inflections (EN)"):
                create_pattern_tab()

            with gr.Tab("πŸ“– Component: Thesaurus (OEWN)"):
                create_wordnet_tab()
            
            with gr.Tab("🌐 Component: ConceptNet (Direct)"):
                create_conceptnet_tab()
                
            with gr.Tab("πŸ”— Component: OpenBLP (EN)"):
                create_openblp_tab()
    
    return demo

# ============================================================================
# 9. MAIN EXECUTION BLOCK
# ============================================================================

if __name__ == "__main__":
    print("\n" + "="*70)
    print("CONSOLIDATED LINGUISTICS HUB (ENGLISH) (STARTING)")
    print("="*70 + "\n")

    # --- 1. Initialize spaCy Models ---
    print("--- Initializing spaCy Models ---")
    spacy_initialize_models()
    print("--- spaCy Done ---\n")
    
    # --- 2. Initialize WordNet Worker (OEWN) ---
    print("--- Initializing OEWN Worker ---")
    if WN_AVAILABLE:
        try:
            wordnet_start_worker()
            print("βœ“ OEWN worker is starting/ready.")
        except Exception as e:
            print(f"βœ— FAILED to start OEWN worker: {e}")
    else:
        print("INFO: OEWN ('wn') library not available, skipping worker.")
    print("--- OEWN Done ---\n")

    # --- 3. Initialize Wiktionary (English) ---
    print("--- Initializing English Wiktionary DB ---")
    try:
        if not wiktionary_download_db():
             print("βœ— WARNING: Failed to download English Wiktionary DB. Primary engine is disabled.")
        else:
             _ = wiktionary_get_connection() # Pre-warm
             wiktionary_run_startup_diagnostics()
    except Exception as e:
        print(f"βœ— FAILED to initialize Wiktionary: {e}")
    print("--- Wiktionary Done ---\n")

    # --- 4. Initialize HanTa Tagger (EN) ---
    print("--- Initializing HanTa Tagger (EN) ---")
    if HANTA_AVAILABLE:
        try:
            hanta_get_tagger_en()
        except Exception as e:
            print(f"βœ— FAILED to start HanTa (EN) tagger: {e}")
    else:
        print("INFO: HanTa library not available, skipping tagger.")
    print("--- HanTa Done ---\n")

    # --- 5. Initialize Stanza Pipeline (EN) ---
    print("--- Initializing Stanza Pipeline (EN) ---")
    if STANZA_AVAILABLE:
        try:
            stanza_get_pipeline_en()
        except Exception as e:
            print(f"βœ— FAILED to start Stanza (EN) pipeline: {e}")
    else:
        print("INFO: Stanza library not available, skipping pipeline.")
    print("--- Stanza Done ---\n")

    # --- 6. Initialize NLTK Lemmatizer ---
    print("--- Initializing NLTK Lemmatizer ---")
    if NLTK_AVAILABLE:
        try:
            nltk_get_lemmatizer()
        except Exception as e:
            print(f"βœ— FAILED to start NLTK: {e}")
    else:
        print("INFO: NLTK library not available, skipping lemmatizer.")
    print("--- NLTK Done ---\n")
    
    # --- 8. Check Pattern.en ---
    print("--- Checking Pattern.en ---")
    if not PATTERN_EN_AVAILABLE:
        print("WARNING: pattern.en library not available. 'Inflections' tab will fail.")
    else:
        print("βœ“ Pattern.en library is available.")
    print("--- Pattern.en Done ---\n")
    
    # --- 9. Initialize ConceptNet Client ---
    print("--- Initializing ConceptNet Client ---")
    if GRADIO_CLIENT_AVAILABLE:
        try:
            get_conceptnet_client()
        except Exception as e:
            print(f"βœ— FAILED to start ConceptNet Client: {e}")
    else:
        print("INFO: gradio_client not available, skipping ConceptNet client.")
    print("--- ConceptNet Client Done ---\n")

    print("="*70)
    print("All services initialized. Launching Gradio Hub (EN)...")
    print("="*70 + "\n")
    
    # --- 10. Launch Gradio ---
    demo = create_consolidated_interface()
    
    # Use a different port (e.g., 7861) to avoid conflicts with the German app
    # demo.launch(server_name="0.0.0.0", server_port=7861, show_error=True)
    
    # No server_port argument!
    demo.launch(server_name="0.0.0.0", show_error=True)