temsa commited on
Commit
c487a4b
·
verified ·
1 Parent(s): d65fc5f

Add rc6 release with decoder repair improvements

Browse files
.gitattributes CHANGED
@@ -1,35 +1,5 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
1
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.onnx filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
3
  *.pt filter=lfs diff=lfs merge=lfs -text
4
  *.pth filter=lfs diff=lfs merge=lfs -text
 
5
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__/
2
+ .venv/
3
+ *.pyc
LICENSE ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
10
+
11
+ "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
12
+
13
+ "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
14
+
15
+ "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
16
+
17
+ "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
18
+
19
+ "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
20
+
21
+ "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
22
+
23
+ "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
24
+
25
+ "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
26
+
27
+ "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
28
+
29
+ 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
30
+
31
+ 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
32
+
33
+ 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
34
+
35
+ (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
36
+
37
+ (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
38
+
39
+ (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
40
+
41
+ (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
42
+
43
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
44
+
45
+ 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
46
+
47
+ 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
48
+
49
+ 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
50
+
51
+ 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
52
+
53
+ 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
54
+
55
+ END OF TERMS AND CONDITIONS
56
+
57
+ APPENDIX: How to apply the Apache License to your work.
58
+
59
+ To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
60
+
61
+ Copyright [yyyy] [name of copyright owner]
62
+
63
+ Licensed under the Apache License, Version 2.0 (the "License");
64
+ you may not use this file except in compliance with the License.
65
+ You may obtain a copy of the License at
66
+
67
+ http://www.apache.org/licenses/LICENSE-2.0
68
+
69
+ Unless required by applicable law or agreed to in writing, software
70
+ distributed under the License is distributed on an "AS IS" BASIS,
71
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
72
+ See the License for the specific language governing permissions and
73
+ limitations under the License.
NOTICE ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ This release is derived from OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1 (Apache-2.0).
2
+
3
+ Additional training data attribution:
4
+ - joelniklaus/mapa (CC-BY-4.0)
5
+ - gretelai/synthetic_pii_finance_multilingual (Apache-2.0)
6
+ - Synthetic Irish datasets created in this workspace and released under Apache-2.0
7
+
8
+ This repo distributes model artifacts and synthetic benchmark files. It does not redistribute third-party dataset rows.
README.md ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - ga
5
+ license: apache-2.0
6
+ library_name: transformers
7
+ pipeline_tag: token-classification
8
+ tags:
9
+ - pii
10
+ - de-identification
11
+ - token-classification
12
+ - ireland
13
+ - irish
14
+ - gaelic
15
+ - ppsn
16
+ - eircode
17
+ - passport
18
+ - phone-number
19
+ - iban
20
+ - onnx
21
+ - int8
22
+ base_model:
23
+ - OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1
24
+ datasets:
25
+ - temsa/OpenMed-Irish-PPSN-Eircode-Spec-v1
26
+ - temsa/OpenMed-Irish-CorePII-TrainMix-v1
27
+ - joelniklaus/mapa
28
+ - gretelai/synthetic_pii_finance_multilingual
29
+ ---
30
+
31
+ # OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc6
32
+
33
+ Token-classification release for Irish core PII in English and Irish Gaelic.
34
+
35
+ ## Included Variants
36
+
37
+ - Full `transformers` checkpoint in the repo root
38
+ - Unquantized ONNX export in `onnx/model.onnx`
39
+ - Dynamic q8 ONNX artifact in `onnx/model_quantized.onnx`
40
+ - `inference_mask.py` for the full checkpoint
41
+ - `inference_mask_onnx.py` for the ONNX q8 artifact
42
+ - benchmark files in `eval/`
43
+
44
+ ## Coverage
45
+
46
+ - `PPSN`
47
+ - `ACCOUNT_NUMBER`
48
+ - `BANK_ROUTING_NUMBER`
49
+ - `CREDIT_DEBIT_CARD`
50
+ - `PASSPORT_NUMBER`
51
+ - `POSTCODE`
52
+ - `PHONE_NUMBER`
53
+ - `EMAIL`
54
+ - `FIRST_NAME`
55
+ - `LAST_NAME`
56
+ - `SWIFT_BIC`
57
+
58
+ ## What Changed From rc5
59
+
60
+ `rc6` keeps the same checkpoint weights and the same bundled ONNX q8 artifact as `temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc5`.
61
+
62
+ The improvement is in the shipped inference stack:
63
+
64
+ - regex-guided span repair for numeric and boundary-heavy labels
65
+ - stronger Irish phone validation, including `+353 (01) ...` forms
66
+ - Irish IBAN plausibility repair that catches real Irish bank-code placeholders while rejecting obvious fake values like `IE00TEST...`
67
+ - PPSN structure filtering that blocks invalid shapes like `12345678T`
68
+ - exact boundary trimming for grouped card and passport formats
69
+
70
+ This is the right change because the remaining misses were mostly decoding and boundary failures, not weight quality failures.
71
+
72
+ ## Recommended Inference
73
+
74
+ Full checkpoint:
75
+
76
+ ```bash
77
+ uv run python inference_mask.py \
78
+ --model temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc6 \
79
+ --ppsn-min-score 0.55 \
80
+ --other-min-score 0.50 \
81
+ --text "Please provide your passport: NN5123456 and call me on 0851234567." \
82
+ --json
83
+ ```
84
+
85
+ Dynamic q8 ONNX:
86
+
87
+ ```bash
88
+ uv run python inference_mask_onnx.py \
89
+ --model temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc6 \
90
+ --onnx-file onnx/model_quantized.onnx \
91
+ --ppsn-min-score 0.55 \
92
+ --other-min-score 0.50 \
93
+ --text "My IBAN is IE29 AIBK 9311 5212 345678 and my PPSN is 1234567T." \
94
+ --json
95
+ ```
96
+
97
+ ## Key Benchmarks
98
+
99
+ ### rc5 vs rc6
100
+
101
+ | Suite | rc5 full | rc6 full | rc5 ONNX q8 | rc6 ONNX q8 |
102
+ |---|---:|---:|---:|---:|
103
+ | Irish core manual | 0.9737 | 1.0000 | 0.9669 | 0.9934 |
104
+ | Irish PPSN / phone edge | 0.9744 | 0.9744 | 0.9744 | 1.0000 |
105
+ | Phone / passport / finance | 0.9600 | 1.0000 | 0.9362 | 1.0000 |
106
+ | Finance boundary repair | 0.9143 | 1.0000 | 0.8750 | 1.0000 |
107
+ | QA Gaelic weak-context PPSN | 1.0000 | 1.0000 | 1.0000 | 1.0000 |
108
+ | HFW contextual smoke | 0.7490 | 0.7712 | n/a | 0.7804 |
109
+
110
+ ### Core Label Breakdown
111
+
112
+ | Label | rc6 full | rc6 ONNX q8 |
113
+ |---|---:|---:|
114
+ | PPSN | 1.0000 | 1.0000 |
115
+ | PHONE_NUMBER | 1.0000 | 1.0000 |
116
+ | POSTCODE | 1.0000 | 0.8571 |
117
+ | PASSPORT_NUMBER | 1.0000 | 1.0000 |
118
+ | ACCOUNT_NUMBER | 1.0000 | 1.0000 |
119
+ | BANK_ROUTING_NUMBER | 1.0000 | 1.0000 |
120
+ | EMAIL | 1.0000 | 1.0000 |
121
+ | FIRST_NAME | 1.0000 | 1.0000 |
122
+ | LAST_NAME | 1.0000 | 1.0000 |
123
+
124
+ ## Dynamic q8 Artifact
125
+
126
+ Artifact paths:
127
+
128
+ - unquantized: `onnx/model.onnx`
129
+ - quantized: `onnx/model_quantized.onnx`
130
+
131
+ Quantization recipe used in this repo:
132
+
133
+ - ONNX pre-processing before quantization
134
+ - ONNX Runtime dynamic int8
135
+ - `qint8`
136
+ - `per_channel=true`
137
+ - `op_types=MatMul,Gemm,Attention`
138
+
139
+ ## Limits
140
+
141
+ - `POSTCODE` remains the main q8 gap on the manual Irish core suite.
142
+ - The HFW contextual smoke suite still contains first/last-name false positives inherited from the underlying checkpoint.
143
+ - This release is a stronger inference stack over `rc5`; it is not a new fine-tuned checkpoint.
144
+
145
+ ## License And Attribution
146
+
147
+ - Release license: Apache-2.0
148
+ - Base model: `OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1`
149
+ - See `NOTICE` and `training_sources.json` for attribution and release details.
config.json ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForTokenClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "dtype": "float32",
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "O",
13
+ "1": "B-account_number",
14
+ "2": "B-age",
15
+ "3": "B-api_key",
16
+ "4": "B-bank_routing_number",
17
+ "5": "B-biometric_identifier",
18
+ "6": "B-blood_type",
19
+ "7": "B-certificate_license_number",
20
+ "8": "B-city",
21
+ "9": "B-company_name",
22
+ "10": "B-coordinate",
23
+ "11": "B-country",
24
+ "12": "B-county",
25
+ "13": "B-credit_debit_card",
26
+ "14": "B-customer_id",
27
+ "15": "B-cvv",
28
+ "16": "B-date",
29
+ "17": "B-date_of_birth",
30
+ "18": "B-date_time",
31
+ "19": "B-device_identifier",
32
+ "20": "B-education_level",
33
+ "21": "B-email",
34
+ "22": "B-employee_id",
35
+ "23": "B-employment_status",
36
+ "24": "B-fax_number",
37
+ "25": "B-first_name",
38
+ "26": "B-gender",
39
+ "27": "B-health_plan_beneficiary_number",
40
+ "28": "B-http_cookie",
41
+ "29": "B-ipv4",
42
+ "30": "B-ipv6",
43
+ "31": "B-language",
44
+ "32": "B-last_name",
45
+ "33": "B-license_plate",
46
+ "34": "B-mac_address",
47
+ "35": "B-medical_record_number",
48
+ "36": "B-occupation",
49
+ "37": "B-password",
50
+ "38": "B-phone_number",
51
+ "39": "B-pin",
52
+ "40": "B-political_view",
53
+ "41": "B-postcode",
54
+ "42": "B-race_ethnicity",
55
+ "43": "B-religious_belief",
56
+ "44": "B-sexuality",
57
+ "45": "B-ssn",
58
+ "46": "B-state",
59
+ "47": "B-street_address",
60
+ "48": "B-swift_bic",
61
+ "49": "B-tax_id",
62
+ "50": "B-time",
63
+ "51": "B-unique_id",
64
+ "52": "B-url",
65
+ "53": "B-user_name",
66
+ "54": "B-vehicle_identifier",
67
+ "55": "I-account_number",
68
+ "56": "I-api_key",
69
+ "57": "I-biometric_identifier",
70
+ "58": "I-blood_type",
71
+ "59": "I-certificate_license_number",
72
+ "60": "I-city",
73
+ "61": "I-company_name",
74
+ "62": "I-coordinate",
75
+ "63": "I-country",
76
+ "64": "I-county",
77
+ "65": "I-credit_debit_card",
78
+ "66": "I-customer_id",
79
+ "67": "I-date",
80
+ "68": "I-date_of_birth",
81
+ "69": "I-date_time",
82
+ "70": "I-device_identifier",
83
+ "71": "I-education_level",
84
+ "72": "I-email",
85
+ "73": "I-employee_id",
86
+ "74": "I-employment_status",
87
+ "75": "I-fax_number",
88
+ "76": "I-first_name",
89
+ "77": "I-gender",
90
+ "78": "I-health_plan_beneficiary_number",
91
+ "79": "I-http_cookie",
92
+ "80": "I-ipv4",
93
+ "81": "I-ipv6",
94
+ "82": "I-language",
95
+ "83": "I-last_name",
96
+ "84": "I-license_plate",
97
+ "85": "I-mac_address",
98
+ "86": "I-medical_record_number",
99
+ "87": "I-occupation",
100
+ "88": "I-password",
101
+ "89": "I-phone_number",
102
+ "90": "I-pin",
103
+ "91": "I-political_view",
104
+ "92": "I-postcode",
105
+ "93": "I-race_ethnicity",
106
+ "94": "I-religious_belief",
107
+ "95": "I-sexuality",
108
+ "96": "I-ssn",
109
+ "97": "I-state",
110
+ "98": "I-street_address",
111
+ "99": "I-swift_bic",
112
+ "100": "I-tax_id",
113
+ "101": "I-time",
114
+ "102": "I-unique_id",
115
+ "103": "I-url",
116
+ "104": "I-user_name",
117
+ "105": "I-vehicle_identifier",
118
+ "106": "B-PPSN",
119
+ "107": "I-PPSN",
120
+ "108": "B-PASSPORT_NUMBER",
121
+ "109": "I-PASSPORT_NUMBER",
122
+ "110": "I-bank_routing_number"
123
+ },
124
+ "initializer_range": 0.02,
125
+ "label2id": {
126
+ "B-PASSPORT_NUMBER": 108,
127
+ "B-PPSN": 106,
128
+ "B-account_number": 1,
129
+ "B-age": 2,
130
+ "B-api_key": 3,
131
+ "B-bank_routing_number": 4,
132
+ "B-biometric_identifier": 5,
133
+ "B-blood_type": 6,
134
+ "B-certificate_license_number": 7,
135
+ "B-city": 8,
136
+ "B-company_name": 9,
137
+ "B-coordinate": 10,
138
+ "B-country": 11,
139
+ "B-county": 12,
140
+ "B-credit_debit_card": 13,
141
+ "B-customer_id": 14,
142
+ "B-cvv": 15,
143
+ "B-date": 16,
144
+ "B-date_of_birth": 17,
145
+ "B-date_time": 18,
146
+ "B-device_identifier": 19,
147
+ "B-education_level": 20,
148
+ "B-email": 21,
149
+ "B-employee_id": 22,
150
+ "B-employment_status": 23,
151
+ "B-fax_number": 24,
152
+ "B-first_name": 25,
153
+ "B-gender": 26,
154
+ "B-health_plan_beneficiary_number": 27,
155
+ "B-http_cookie": 28,
156
+ "B-ipv4": 29,
157
+ "B-ipv6": 30,
158
+ "B-language": 31,
159
+ "B-last_name": 32,
160
+ "B-license_plate": 33,
161
+ "B-mac_address": 34,
162
+ "B-medical_record_number": 35,
163
+ "B-occupation": 36,
164
+ "B-password": 37,
165
+ "B-phone_number": 38,
166
+ "B-pin": 39,
167
+ "B-political_view": 40,
168
+ "B-postcode": 41,
169
+ "B-race_ethnicity": 42,
170
+ "B-religious_belief": 43,
171
+ "B-sexuality": 44,
172
+ "B-ssn": 45,
173
+ "B-state": 46,
174
+ "B-street_address": 47,
175
+ "B-swift_bic": 48,
176
+ "B-tax_id": 49,
177
+ "B-time": 50,
178
+ "B-unique_id": 51,
179
+ "B-url": 52,
180
+ "B-user_name": 53,
181
+ "B-vehicle_identifier": 54,
182
+ "I-PASSPORT_NUMBER": 109,
183
+ "I-PPSN": 107,
184
+ "I-account_number": 55,
185
+ "I-api_key": 56,
186
+ "I-bank_routing_number": 110,
187
+ "I-biometric_identifier": 57,
188
+ "I-blood_type": 58,
189
+ "I-certificate_license_number": 59,
190
+ "I-city": 60,
191
+ "I-company_name": 61,
192
+ "I-coordinate": 62,
193
+ "I-country": 63,
194
+ "I-county": 64,
195
+ "I-credit_debit_card": 65,
196
+ "I-customer_id": 66,
197
+ "I-date": 67,
198
+ "I-date_of_birth": 68,
199
+ "I-date_time": 69,
200
+ "I-device_identifier": 70,
201
+ "I-education_level": 71,
202
+ "I-email": 72,
203
+ "I-employee_id": 73,
204
+ "I-employment_status": 74,
205
+ "I-fax_number": 75,
206
+ "I-first_name": 76,
207
+ "I-gender": 77,
208
+ "I-health_plan_beneficiary_number": 78,
209
+ "I-http_cookie": 79,
210
+ "I-ipv4": 80,
211
+ "I-ipv6": 81,
212
+ "I-language": 82,
213
+ "I-last_name": 83,
214
+ "I-license_plate": 84,
215
+ "I-mac_address": 85,
216
+ "I-medical_record_number": 86,
217
+ "I-occupation": 87,
218
+ "I-password": 88,
219
+ "I-phone_number": 89,
220
+ "I-pin": 90,
221
+ "I-political_view": 91,
222
+ "I-postcode": 92,
223
+ "I-race_ethnicity": 93,
224
+ "I-religious_belief": 94,
225
+ "I-sexuality": 95,
226
+ "I-ssn": 96,
227
+ "I-state": 97,
228
+ "I-street_address": 98,
229
+ "I-swift_bic": 99,
230
+ "I-tax_id": 100,
231
+ "I-time": 101,
232
+ "I-unique_id": 102,
233
+ "I-url": 103,
234
+ "I-user_name": 104,
235
+ "I-vehicle_identifier": 105,
236
+ "O": 0
237
+ },
238
+ "max_position_embeddings": 512,
239
+ "model_type": "distilbert",
240
+ "n_heads": 12,
241
+ "n_layers": 6,
242
+ "output_past": true,
243
+ "pad_token_id": 0,
244
+ "qa_dropout": 0.1,
245
+ "seq_classif_dropout": 0.2,
246
+ "sinusoidal_pos_embds": false,
247
+ "tie_weights_": true,
248
+ "transformers_version": "4.57.6",
249
+ "vocab_size": 119547
250
+ }
eircode.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import random
3
+ import re
4
+ from typing import Any, Dict, Iterator
5
+
6
+ # Based on the public Eircode format documentation and the public routing-area list.
7
+ # Routing keys were materialized from the Wikipedia routing-area table so generation
8
+ # can stay offline and reproducible in this workspace.
9
+
10
+ EIRCODE_ROUTING_KEYS = ['A92', 'Y14', 'A84', 'H65', 'N37', 'R14', 'K32', 'F26', 'H53', 'P31', 'F31', 'A75', 'A41', 'F35', 'F56', 'P72', 'P75', 'H14', 'R42', 'A94', 'F52', 'A98', 'V23', 'E21', 'R93', 'A81', 'N41', 'E32', 'P43', 'E25', 'F23', 'F45', 'H12', 'P56', 'F12', 'H71', 'P85', 'H23', 'E91', 'P24', 'H16', 'T12', 'T23', 'P14', 'P32', 'P47', 'T56', 'T34', 'R56', 'A63', 'F94', 'D01', 'D02', 'D03', 'D04', 'D05', 'D06', 'D6W', 'D07', 'D08', 'D09', 'D10', 'D11', 'D12', 'D13', 'D14', 'D15', 'D16', 'D17', 'D18', 'D20', 'D22', 'D24', 'A86', 'A91', 'X35', 'A85', 'R45', 'A83', 'V95', 'Y21', 'P61', 'H91', 'A42', 'A96', 'Y25', 'A82', 'R51', 'R95', 'V93', 'X42', 'V35', 'V15', 'P17', 'F92', 'F93', 'V94', 'V31', 'T45', 'N39', 'H62', 'K78', 'K45', 'P12', 'K36', 'P51', 'W23', 'P25', 'P67', 'H18', 'W34', 'R21', 'N91', 'W91', 'C15', 'E45', 'Y34', 'W12', 'V42', 'A45', 'R32', 'A67', 'F42', 'E53', 'K56', 'V14', 'K34', 'P81', 'F91', 'K67', 'E41', 'E34', 'V92', 'H54', 'R35', 'X91', 'F28', 'Y35', 'P36']
11
+ EIRCODE_ROUTING_KEY_SET = set(EIRCODE_ROUTING_KEYS)
12
+ UNIQUE_IDENTIFIER_CHARS = "0123456789ACDEFHKNPRTVWXY"
13
+ UNIQUE_IDENTIFIER_SET = set(UNIQUE_IDENTIFIER_CHARS)
14
+ SEPARATORS_RE = re.compile(r"[\s\u00A0]+")
15
+ STRICT_RE = re.compile(r"^(?:[ACDEFHKNPRTVWXY]\d{2}|D6W) [0-9ACDEFHKNPRTVWXY]{4}$", re.IGNORECASE)
16
+ CANDIDATE_RE = re.compile(r"\b(?:[ACDEFHKNPRTVWXY]\d{2}|D6W)(?:[\s\u00A0]*[0-9ACDEFHKNPRTVWXY]{4})\b", re.IGNORECASE)
17
+
18
+
19
+ def normalize(value: str) -> str:
20
+ return SEPARATORS_RE.sub("", value.strip().upper())
21
+
22
+
23
+ def format_eircode(value: str) -> str:
24
+ compact = normalize(value)
25
+ if len(compact) != 7:
26
+ raise ValueError("Eircode must normalize to 7 characters")
27
+ return f"{compact[:3]} {compact[3:]}"
28
+
29
+
30
+ def is_valid_routing_key(value: str) -> bool:
31
+ return normalize(value)[:3] in EIRCODE_ROUTING_KEY_SET
32
+
33
+
34
+ def is_valid_unique_identifier(value: str) -> bool:
35
+ compact = normalize(value)
36
+ if len(compact) < 7:
37
+ return False
38
+ return all(ch in UNIQUE_IDENTIFIER_SET for ch in compact[3:7])
39
+
40
+
41
+ def is_valid_eircode(value: str, strict_spacing: bool = False) -> bool:
42
+ compact = normalize(value)
43
+ if len(compact) != 7:
44
+ return False
45
+ if compact[:3] not in EIRCODE_ROUTING_KEY_SET:
46
+ return False
47
+ if not all(ch in UNIQUE_IDENTIFIER_SET for ch in compact[3:]):
48
+ return False
49
+ if strict_spacing:
50
+ return STRICT_RE.match(value.strip().upper()) is not None
51
+ return True
52
+
53
+
54
+ def generate_unique_identifier() -> str:
55
+ return ''.join(random.choice(UNIQUE_IDENTIFIER_CHARS) for _ in range(4))
56
+
57
+
58
+ def generate_eircode(compact: bool = False) -> str:
59
+ value = random.choice(EIRCODE_ROUTING_KEYS) + generate_unique_identifier()
60
+ return value if compact else format_eircode(value)
61
+
62
+
63
+ def corrupt_eircode(value: str | None = None) -> str:
64
+ compact = normalize(value or generate_eircode(compact=True))
65
+ if len(compact) != 7:
66
+ compact = normalize(generate_eircode(compact=True))
67
+ mode = random.choice(['routing', 'suffix', 'length'])
68
+ if mode == 'routing':
69
+ bad_prefixes = ['B12', 'Z99', 'Q1A', 'O00']
70
+ return format_eircode(random.choice(bad_prefixes) + compact[3:7])
71
+ if mode == 'suffix':
72
+ bad_chars = 'BGIJLMOQSUZ'
73
+ pos = random.randint(3, 6)
74
+ chars = list(compact)
75
+ chars[pos] = random.choice(bad_chars)
76
+ return format_eircode(''.join(chars))
77
+ if random.random() < 0.5:
78
+ return compact[:6]
79
+ return compact + random.choice('BGIJLMOQSUZ')
80
+
81
+
82
+ def iter_eircode_candidates(text: str) -> Iterator[Dict[str, Any]]:
83
+ for match in CANDIDATE_RE.finditer(text):
84
+ raw = match.group(0)
85
+ yield {
86
+ 'start': match.start(),
87
+ 'end': match.end(),
88
+ 'text': raw,
89
+ 'normalized': normalize(raw),
90
+ }
eval/benchmark_summary.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "release": "OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc6",
3
+ "based_on_release": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc5",
4
+ "weights_changed": false,
5
+ "artifacts_changed": false,
6
+ "inference_stack_changed": true,
7
+ "full": {
8
+ "other_min_score": 0.5,
9
+ "ppsn_min_score": 0.55,
10
+ "irish_core_manual_f1": 1.0,
11
+ "irish_edge_f1": 0.9743589743589743,
12
+ "gaelic_weak_ppsn_f1": 1.0,
13
+ "finance_suite_f1": 1.0,
14
+ "finance_boundary_f1": 1.0,
15
+ "hfw_context_smoke_f1": 0.7711757269279392
16
+ },
17
+ "onnx_q8": {
18
+ "other_min_score": 0.5,
19
+ "ppsn_min_score": 0.55,
20
+ "irish_core_manual_f1": 0.9933774834437086,
21
+ "irish_edge_f1": 1.0,
22
+ "gaelic_weak_ppsn_f1": 1.0,
23
+ "finance_suite_f1": 1.0,
24
+ "finance_boundary_f1": 1.0,
25
+ "hfw_context_smoke_f1": 0.7803617571059432
26
+ },
27
+ "comparisons": {
28
+ "rc5_full_irish_core_manual_f1": 0.9736842105263158,
29
+ "rc5_full_finance_boundary_f1": 0.9142857142857143,
30
+ "rc5_full_finance_suite_f1": 0.96,
31
+ "rc5_q8_irish_core_manual_f1": 0.9668874172185431,
32
+ "rc5_q8_finance_boundary_f1": 0.8750000000000001,
33
+ "rc5_q8_finance_suite_f1": 0.9361702127659575
34
+ }
35
+ }
eval/benchmark_summary.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Benchmark Summary
2
+
3
+ ## What Changed
4
+
5
+ `rc6` keeps the same checkpoint weights and ONNX q8 artifact as `temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc5`, but ships a stronger label-aware decoder:
6
+
7
+ - regex-guided span repair for `PPSN`, `PASSPORT_NUMBER`, `BANK_ROUTING_NUMBER`, `ACCOUNT_NUMBER`, `CREDIT_DEBIT_CARD`, `PHONE_NUMBER`, `SWIFT_BIC`, and `POSTCODE`
8
+ - stronger Irish phone validation, including `+353 (01) ...` forms
9
+ - Irish IBAN plausibility repair that catches real Irish bank-code placeholders and rejects obvious fake values like `IE00TEST...`
10
+ - PPSN structure filtering to block invalid shapes such as `12345678T`
11
+ - exact-span trimming for grouped numeric formats
12
+
13
+ ## Full Checkpoint
14
+
15
+ | Suite | rc5 | rc6 |
16
+ |---|---:|---:|
17
+ | Irish core manual | 0.9737 | 1.0000 |
18
+ | Phone / passport / finance | 0.9600 | 1.0000 |
19
+ | Finance boundary repair | 0.9143 | 1.0000 |
20
+ | Gaelic weak-context PPSN | 1.0000 | 1.0000 |
21
+ | Irish PPSN / phone edge | 0.9744 | 0.9744 |
22
+ | HFW contextual smoke | 0.7490 | 0.7712 |
23
+
24
+ ## ONNX q8
25
+
26
+ | Suite | rc5 q8 | rc6 q8 |
27
+ |---|---:|---:|
28
+ | Irish core manual | 0.9669 | 0.9934 |
29
+ | Phone / passport / finance | 0.9362 | 1.0000 |
30
+ | Finance boundary repair | 0.8750 | 1.0000 |
31
+ | Gaelic weak-context PPSN | 1.0000 | 1.0000 |
32
+ | Irish PPSN / phone edge | 0.9744 | 1.0000 |
33
+ | HFW contextual smoke | n/a | 0.7804 |
inference_mask.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+ import os
5
+
6
+ os.environ.setdefault("TRANSFORMERS_NO_TF", "1")
7
+ os.environ.setdefault("TRANSFORMERS_NO_FLAX", "1")
8
+ os.environ.setdefault("TRANSFORMERS_NO_TORCHVISION", "1")
9
+ os.environ["USE_TF"] = "0"
10
+ os.environ["USE_FLAX"] = "0"
11
+ os.environ["USE_TORCH"] = "1"
12
+
13
+ import torch
14
+ from transformers import AutoModelForTokenClassification, pipeline
15
+
16
+ from irish_core_decoder import repair_irish_core_spans
17
+ from onnx_token_classifier import safe_auto_tokenizer
18
+
19
+
20
+ def mask_text(text: str, spans: list[dict]) -> str:
21
+ out = text
22
+ for span in sorted(spans, key=lambda item: (item["start"], item["end"]), reverse=True):
23
+ out = out[:span["start"]] + f"[{span['label']}]" + out[span["end"]:]
24
+ return out
25
+
26
+
27
+ def main() -> None:
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("--model", default=".")
30
+ parser.add_argument("--text", required=True)
31
+ parser.add_argument("--device", choices=["auto", "cpu", "cuda"], default="auto")
32
+ parser.add_argument("--ppsn-min-score", type=float, default=0.55)
33
+ parser.add_argument("--other-min-score", type=float, default=0.50)
34
+ parser.add_argument("--json", action="store_true")
35
+ args = parser.parse_args()
36
+
37
+ tokenizer = safe_auto_tokenizer(args.model)
38
+ model = AutoModelForTokenClassification.from_pretrained(args.model)
39
+ if args.device == "auto":
40
+ device = "cuda" if torch.cuda.is_available() else "cpu"
41
+ else:
42
+ device = args.device
43
+ model.to(device)
44
+ model.eval()
45
+
46
+ nlp = pipeline(
47
+ "token-classification",
48
+ model=model,
49
+ tokenizer=tokenizer,
50
+ aggregation_strategy="simple",
51
+ device=0 if device == "cuda" else -1,
52
+ )
53
+ general = nlp(args.text)
54
+ spans = repair_irish_core_spans(
55
+ args.text,
56
+ model,
57
+ tokenizer,
58
+ general,
59
+ other_min_score=args.other_min_score,
60
+ ppsn_min_score=args.ppsn_min_score,
61
+ )
62
+ result = {
63
+ "model": args.model,
64
+ "masked_text": mask_text(args.text, spans),
65
+ "spans": spans,
66
+ "ppsn_decoder": "word_aligned",
67
+ "general_decoder": "irish_core_label_aware",
68
+ "ppsn_min_score": args.ppsn_min_score,
69
+ "other_min_score": args.other_min_score,
70
+ "backend": "transformers",
71
+ }
72
+ if args.json:
73
+ print(json.dumps(result, indent=2, ensure_ascii=False))
74
+ else:
75
+ print(result["masked_text"])
76
+
77
+
78
+ if __name__ == "__main__":
79
+ main()
inference_mask_onnx.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import json
4
+
5
+ from irish_core_decoder import repair_irish_core_spans_onnx
6
+ from onnx_token_classifier import load_onnx_token_classifier
7
+
8
+
9
+ def mask_text(text: str, spans: list[dict]) -> str:
10
+ out = text
11
+ for span in sorted(spans, key=lambda item: (item["start"], item["end"]), reverse=True):
12
+ out = out[:span["start"]] + f"[{span['label']}]" + out[span["end"]:]
13
+ return out
14
+
15
+
16
+ def main() -> None:
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument("--model", default=".")
19
+ parser.add_argument("--onnx-file", default="onnx/model_quantized.onnx")
20
+ parser.add_argument("--text", required=True)
21
+ parser.add_argument("--ppsn-min-score", type=float, default=0.55)
22
+ parser.add_argument("--other-min-score", type=float, default=0.50)
23
+ parser.add_argument("--json", action="store_true")
24
+ args = parser.parse_args()
25
+
26
+ session, tokenizer, config, onnx_path = load_onnx_token_classifier(args.model, onnx_file=args.onnx_file)
27
+ spans = repair_irish_core_spans_onnx(
28
+ args.text,
29
+ session,
30
+ tokenizer,
31
+ config,
32
+ other_min_score=args.other_min_score,
33
+ ppsn_min_score=args.ppsn_min_score,
34
+ )
35
+ result = {
36
+ "model": args.model,
37
+ "onnx_file": str(onnx_path),
38
+ "masked_text": mask_text(args.text, spans),
39
+ "spans": spans,
40
+ "ppsn_decoder": "word_aligned",
41
+ "general_decoder": "irish_core_label_aware",
42
+ "ppsn_min_score": args.ppsn_min_score,
43
+ "other_min_score": args.other_min_score,
44
+ "backend": "onnx",
45
+ }
46
+ if args.json:
47
+ print(json.dumps(result, indent=2, ensure_ascii=False))
48
+ else:
49
+ print(result["masked_text"])
50
+
51
+
52
+ if __name__ == "__main__":
53
+ main()
irish_core_decoder.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import regex as re
3
+ import torch
4
+
5
+ from eircode import iter_eircode_candidates, is_valid_eircode
6
+ from ppsn import is_plausible_ppsn, iter_ppsn_candidates
7
+ from raw_word_aligned import word_aligned_ppsn_spans
8
+
9
+
10
+ TOKEN_RE = re.compile(r"[A-Za-z0-9]+|[^\w\s]", re.UNICODE)
11
+ PHONE_RE = re.compile(r"^(?:\+353(?:\s*\(0\))?[\s-]*\(?\d{1,3}\)?(?:[\s-]*\d){6,8}|0\(?\d{1,3}\)?(?:[\s-]*\d){6,8})$")
12
+ PHONE_CANDIDATE_RE = re.compile(r"(?<![A-Za-z0-9])(?:\+353(?:\s*\(0\))?[\s-]*\(?\d{1,3}\)?(?:[\s-]*\d){6,8}|0\(?\d{1,3}\)?(?:[\s-]*\d){6,8})(?![A-Za-z0-9])")
13
+ PASSPORT_RE = re.compile(r"^[A-Z]{2}\s?\d{7}$")
14
+ PASSPORT_CANDIDATE_RE = re.compile(r"(?<![A-Za-z0-9])[A-Z]{2}\s?\d{7}(?![A-Za-z0-9])", re.IGNORECASE)
15
+ SORT_RE = re.compile(r"^(?:\d{6}|\d{2}[ -]\d{2}[ -]\d{2})$")
16
+ BANK_ROUTING_CANDIDATE_RE = re.compile(r"(?<!\d)(?:\d{6}|\d{2}[ -]\d{2}[ -]\d{2})(?!\d)")
17
+ IBAN_IE_RE = re.compile(r"^IE\d{2}[A-Z]{4}\d{14}$")
18
+ IBAN_IE_CANDIDATE_RE = re.compile(r"(?<![A-Za-z0-9])IE\d{2}(?:[\s-]?[A-Z0-9]){18}(?![A-Za-z0-9])", re.IGNORECASE)
19
+ BIC_RE = re.compile(r"^[A-Z]{4}[A-Z]{2}[A-Z0-9]{2}(?:[A-Z0-9]{3})?$")
20
+ BIC_CANDIDATE_RE = re.compile(r"(?<![A-Za-z0-9])[A-Z]{4}[A-Z]{2}[A-Z0-9]{2}(?:[A-Z0-9]{3})?(?![A-Za-z0-9])", re.IGNORECASE)
21
+ EIRCODE_RE = re.compile(r"^(?:[ACDEFHKNPRTVWXY]\d{2}|D6W)\s?[0-9ACDEFHKNPRTVWXY]{4}$", re.IGNORECASE)
22
+ CARD_GROUPED_RE = re.compile(r"^(?:\d{4}(?:[ -]\d{4}){3,4}|\d{4}[ -]\d{6}[ -]\d{5})$")
23
+ CARD_CANDIDATE_RE = re.compile(r"(?<!\d)(?:\d[ -]?){13,19}(?!\d)")
24
+ KNOWN_IE_IBAN_BANK_CODES = {
25
+ "AIBK",
26
+ "BOFI",
27
+ "IPBS",
28
+ "IRCE",
29
+ "ULSB",
30
+ "PTSB",
31
+ "EBSI",
32
+ "DABA",
33
+ "CITI",
34
+ "TRWI",
35
+ "REVO",
36
+ }
37
+
38
+ DEFAULT_LABEL_THRESHOLDS = {
39
+ "PHONE_NUMBER": 0.35,
40
+ "PASSPORT_NUMBER": 0.11,
41
+ "BANK_ROUTING_NUMBER": 0.35,
42
+ "ACCOUNT_NUMBER": 0.40,
43
+ "CREDIT_DEBIT_CARD": 0.08,
44
+ "SWIFT_BIC": 0.50,
45
+ }
46
+
47
+ FORMAT_LABELS = set(DEFAULT_LABEL_THRESHOLDS)
48
+ OUTPUT_PRIORITY = {
49
+ "PPSN": 0,
50
+ "PASSPORT_NUMBER": 1,
51
+ "ACCOUNT_NUMBER": 2,
52
+ "BANK_ROUTING_NUMBER": 3,
53
+ "CREDIT_DEBIT_CARD": 4,
54
+ "PHONE_NUMBER": 5,
55
+ "SWIFT_BIC": 6,
56
+ "POSTCODE": 7,
57
+ "EMAIL": 8,
58
+ "FIRST_NAME": 9,
59
+ "LAST_NAME": 10,
60
+ }
61
+
62
+
63
+ def tokenize_with_spans(text: str):
64
+ return [(m.group(0), m.start(), m.end()) for m in TOKEN_RE.finditer(text)]
65
+
66
+
67
+ def normalize_label(label: str) -> str:
68
+ label = (label or "").strip()
69
+ if label.startswith("B-") or label.startswith("I-"):
70
+ label = label[2:]
71
+ return label.upper()
72
+
73
+
74
+ def luhn_ok(value: str) -> bool:
75
+ digits = "".join(ch for ch in value if ch.isdigit())
76
+ if not (13 <= len(digits) <= 19):
77
+ return False
78
+ total = 0
79
+ double = False
80
+ for ch in reversed(digits):
81
+ number = int(ch)
82
+ if double:
83
+ number *= 2
84
+ if number > 9:
85
+ number -= 9
86
+ total += number
87
+ double = not double
88
+ return total % 10 == 0
89
+
90
+
91
+ def iban_mod97_ok(value: str) -> bool:
92
+ compact = re.sub(r"[\s-]+", "", value.strip().upper())
93
+ if not IBAN_IE_RE.match(compact):
94
+ return False
95
+ rearranged = compact[4:] + compact[:4]
96
+ remainder = 0
97
+ for ch in rearranged:
98
+ if ch.isdigit():
99
+ digits = ch
100
+ else:
101
+ digits = str(ord(ch) - ord("A") + 10)
102
+ for digit in digits:
103
+ remainder = (remainder * 10 + int(digit)) % 97
104
+ return remainder == 1
105
+
106
+
107
+ def is_plausible_ie_iban(value: str) -> bool:
108
+ compact = re.sub(r"[\s-]+", "", value.strip().upper())
109
+ if not IBAN_IE_RE.match(compact):
110
+ return False
111
+ if iban_mod97_ok(compact):
112
+ return True
113
+ return compact[4:8] in KNOWN_IE_IBAN_BANK_CODES
114
+
115
+
116
+ def normalize_irish_phone(value: str) -> str:
117
+ compact = value.strip()
118
+ compact = compact.replace("(0)", "0")
119
+ compact = re.sub(r"[\s\-\(\)]", "", compact)
120
+ if compact.startswith("00353"):
121
+ compact = "+" + compact[2:]
122
+ return compact
123
+
124
+
125
+ def is_valid_irish_phone(value: str) -> bool:
126
+ compact = normalize_irish_phone(value)
127
+ if compact.startswith("+353"):
128
+ rest = compact[4:]
129
+ if rest.startswith("0"):
130
+ rest = rest[1:]
131
+ if not rest.isdigit():
132
+ return False
133
+ if rest.startswith("8"):
134
+ return len(rest) == 9
135
+ return len(rest) in {8, 9}
136
+ if not compact.startswith("0") or not compact.isdigit():
137
+ return False
138
+ if compact.startswith("08"):
139
+ return len(compact) == 10
140
+ return len(compact) in {9, 10}
141
+
142
+
143
+ def is_plausible_card(value: str) -> bool:
144
+ digits = "".join(ch for ch in value if ch.isdigit())
145
+ if not (13 <= len(digits) <= 19):
146
+ return False
147
+ if luhn_ok(value):
148
+ return True
149
+ return CARD_GROUPED_RE.match(value.strip()) is not None
150
+
151
+
152
+ def normalize_passport(value: str) -> str:
153
+ return re.sub(r"\s+", "", value.strip().upper())
154
+
155
+
156
+ def regex_candidates_for_label(text: str, label: str):
157
+ label = label.upper()
158
+ if label == "PPSN":
159
+ for candidate in iter_ppsn_candidates(text):
160
+ yield candidate
161
+ return
162
+ if label == "POSTCODE":
163
+ for candidate in iter_eircode_candidates(text):
164
+ yield candidate
165
+ return
166
+ pattern = {
167
+ "PHONE_NUMBER": PHONE_CANDIDATE_RE,
168
+ "PASSPORT_NUMBER": PASSPORT_CANDIDATE_RE,
169
+ "BANK_ROUTING_NUMBER": BANK_ROUTING_CANDIDATE_RE,
170
+ "ACCOUNT_NUMBER": IBAN_IE_CANDIDATE_RE,
171
+ "CREDIT_DEBIT_CARD": CARD_CANDIDATE_RE,
172
+ "SWIFT_BIC": BIC_CANDIDATE_RE,
173
+ }.get(label)
174
+ if pattern is None:
175
+ return
176
+ for match in pattern.finditer(text):
177
+ yield {
178
+ "start": match.start(),
179
+ "end": match.end(),
180
+ "text": match.group(0),
181
+ "normalized": match.group(0),
182
+ }
183
+
184
+
185
+ def plausible_label_text(label: str, value: str) -> bool:
186
+ value = value.strip()
187
+ if label == "PPSN":
188
+ return is_plausible_ppsn(value)
189
+ if label == "PHONE_NUMBER":
190
+ return PHONE_RE.match(value) is not None and is_valid_irish_phone(value)
191
+ if label == "PASSPORT_NUMBER":
192
+ return PASSPORT_RE.match(normalize_passport(value)) is not None
193
+ if label == "BANK_ROUTING_NUMBER":
194
+ return SORT_RE.match(value) is not None
195
+ if label == "ACCOUNT_NUMBER":
196
+ compact = re.sub(r"[\s-]+", "", value)
197
+ return is_plausible_ie_iban(value) or (compact.isdigit() and len(compact) == 8)
198
+ if label == "CREDIT_DEBIT_CARD":
199
+ return is_plausible_card(value)
200
+ if label == "SWIFT_BIC":
201
+ return BIC_RE.match(value.upper()) is not None
202
+ if label == "POSTCODE":
203
+ return EIRCODE_RE.match(value) is not None and is_valid_eircode(value)
204
+ return True
205
+
206
+
207
+ def label_ids_from_mapping(id2label, label: str):
208
+ target = label.upper()
209
+ ids = []
210
+ for raw_id, raw_label in id2label.items():
211
+ if normalize_label(str(raw_label)) == target:
212
+ ids.append(int(raw_id))
213
+ return ids
214
+
215
+
216
+ def label_ids(model, label: str):
217
+ return label_ids_from_mapping(model.config.id2label, label)
218
+
219
+
220
+ def word_scores_for_label(text: str, model, tokenizer, label: str):
221
+ pieces = tokenize_with_spans(text)
222
+ if not pieces:
223
+ return pieces, []
224
+ words = [word for word, _, _ in pieces]
225
+ encoded = tokenizer(words, is_split_into_words=True, return_tensors="pt", truncation=True)
226
+ word_ids = encoded.word_ids(batch_index=0)
227
+ device = next(model.parameters()).device
228
+ encoded = {key: value.to(device) for key, value in encoded.items()}
229
+ with torch.no_grad():
230
+ logits = model(**encoded).logits[0]
231
+ probs = torch.softmax(logits, dim=-1)
232
+ ids = label_ids(model, label)
233
+ scores = []
234
+ for word_index in range(len(pieces)):
235
+ score = 0.0
236
+ for token_index, wid in enumerate(word_ids):
237
+ if wid != word_index:
238
+ continue
239
+ for label_id in ids:
240
+ score = max(score, float(probs[token_index, label_id]))
241
+ scores.append(score)
242
+ return pieces, scores
243
+
244
+
245
+ def word_scores_for_label_onnx(text: str, session, tokenizer, config, label: str):
246
+ from onnx_token_classifier import _run_onnx, _softmax
247
+
248
+ pieces = tokenize_with_spans(text)
249
+ if not pieces:
250
+ return pieces, []
251
+ words = [word for word, _, _ in pieces]
252
+ encoded = tokenizer(words, is_split_into_words=True, return_tensors="np", truncation=True)
253
+ word_ids = encoded.word_ids(batch_index=0)
254
+ logits = _run_onnx(session, encoded)[0]
255
+ probs = _softmax(logits, axis=-1)
256
+ ids = label_ids_from_mapping(config.id2label, label)
257
+ scores = []
258
+ for word_index in range(len(pieces)):
259
+ score = 0.0
260
+ for token_index, wid in enumerate(word_ids):
261
+ if wid != word_index:
262
+ continue
263
+ for label_id in ids:
264
+ score = max(score, float(probs[token_index, label_id]))
265
+ scores.append(score)
266
+ return pieces, scores
267
+
268
+
269
+ def _word_aligned_label_spans_from_scores(text: str, label: str, threshold: float, pieces, scores):
270
+ spans = []
271
+ active = None
272
+ for (word, start, end), score in zip(pieces, scores):
273
+ keep = score >= threshold
274
+ if label in {"PHONE_NUMBER", "BANK_ROUTING_NUMBER", "CREDIT_DEBIT_CARD"} and word in {"-", "/"}:
275
+ keep = active is not None and score >= threshold / 2.0
276
+ if keep:
277
+ if active is None:
278
+ active = {"start": start, "end": end, "label": label}
279
+ else:
280
+ if start - active["end"] <= 1:
281
+ active["end"] = end
282
+ else:
283
+ spans.append(active)
284
+ active = {"start": start, "end": end, "label": label}
285
+ elif active is not None:
286
+ spans.append(active)
287
+ active = None
288
+ if active is not None:
289
+ spans.append(active)
290
+ out = []
291
+ for span in spans:
292
+ value = text[span["start"] : span["end"]]
293
+ if plausible_label_text(label, value):
294
+ out.append(
295
+ {
296
+ "label": label,
297
+ "start": span["start"],
298
+ "end": span["end"],
299
+ "text": value,
300
+ }
301
+ )
302
+ return out
303
+
304
+
305
+ def word_aligned_label_spans(
306
+ text: str,
307
+ model,
308
+ tokenizer,
309
+ label: str,
310
+ threshold: float,
311
+ ):
312
+ pieces, scores = word_scores_for_label(text, model, tokenizer, label)
313
+ return _word_aligned_label_spans_from_scores(text, label, threshold, pieces, scores)
314
+
315
+
316
+ def word_aligned_label_spans_onnx(
317
+ text: str,
318
+ session,
319
+ tokenizer,
320
+ config,
321
+ label: str,
322
+ threshold: float,
323
+ ):
324
+ pieces, scores = word_scores_for_label_onnx(text, session, tokenizer, config, label)
325
+ return _word_aligned_label_spans_from_scores(text, label, threshold, pieces, scores)
326
+
327
+
328
+ def regex_guided_label_spans(text: str, label: str, threshold: float, pieces, scores):
329
+ if not pieces:
330
+ return []
331
+ out = []
332
+ for candidate in regex_candidates_for_label(text, label):
333
+ start = int(candidate["start"])
334
+ end = int(candidate["end"])
335
+ while start < end and text[start].isspace():
336
+ start += 1
337
+ while end > start and text[end - 1].isspace():
338
+ end -= 1
339
+ support = 0.0
340
+ for (_, piece_start, piece_end), score in zip(pieces, scores):
341
+ if piece_end <= start or piece_start >= end:
342
+ continue
343
+ support = max(support, float(score))
344
+ value = text[start:end]
345
+ if support >= threshold and plausible_label_text(label, value):
346
+ out.append(
347
+ {
348
+ "label": label,
349
+ "start": start,
350
+ "end": end,
351
+ "text": value,
352
+ "score": support,
353
+ }
354
+ )
355
+ return out
356
+
357
+
358
+ def pipeline_to_spans(text: str, outputs: list[dict], min_score: float):
359
+ spans = []
360
+ for output in outputs:
361
+ label = normalize_label(output.get("entity_group") or output.get("entity") or "")
362
+ if not label:
363
+ continue
364
+ score = float(output.get("score", 0.0))
365
+ if score < min_score:
366
+ continue
367
+ spans.append(
368
+ {
369
+ "label": label,
370
+ "start": int(output["start"]),
371
+ "end": int(output["end"]),
372
+ "score": score,
373
+ "text": text[int(output["start"]) : int(output["end"])],
374
+ }
375
+ )
376
+ return spans
377
+
378
+
379
+ def overlaps(a: dict, b: dict) -> bool:
380
+ return not (a["end"] <= b["start"] or b["end"] <= a["start"])
381
+
382
+
383
+ def span_length(span: dict) -> int:
384
+ return int(span["end"]) - int(span["start"])
385
+
386
+
387
+ def normalize_simple_span(span: dict):
388
+ label = normalize_label(span["label"])
389
+ value = span["text"]
390
+ if label == "PHONE_NUMBER" and plausible_label_text("CREDIT_DEBIT_CARD", value):
391
+ label = "CREDIT_DEBIT_CARD"
392
+ if label in FORMAT_LABELS or label == "POSTCODE":
393
+ if not plausible_label_text(label, value):
394
+ return None
395
+ return {
396
+ "label": label,
397
+ "start": int(span["start"]),
398
+ "end": int(span["end"]),
399
+ "score": float(span.get("score", 0.0)),
400
+ "text": value,
401
+ }
402
+
403
+
404
+ def dedupe_and_sort(spans: list[dict]):
405
+ ordered = sorted(
406
+ spans,
407
+ key=lambda span: (
408
+ int(span["start"]),
409
+ -span_length(span),
410
+ OUTPUT_PRIORITY.get(str(span["label"]).upper(), 99),
411
+ ),
412
+ )
413
+ kept = []
414
+ for span in ordered:
415
+ if any(overlaps(span, other) for other in kept):
416
+ continue
417
+ kept.append(span)
418
+ return kept
419
+
420
+
421
+ def repair_irish_core_spans(
422
+ text: str,
423
+ model,
424
+ tokenizer,
425
+ general_outputs: list[dict],
426
+ other_min_score: float,
427
+ ppsn_min_score: float,
428
+ label_thresholds: dict[str, float] | None = None,
429
+ ):
430
+ thresholds = dict(DEFAULT_LABEL_THRESHOLDS)
431
+ if label_thresholds:
432
+ thresholds.update({key.upper(): value for key, value in label_thresholds.items()})
433
+
434
+ spans = []
435
+ for span in pipeline_to_spans(text, general_outputs, min_score=other_min_score):
436
+ normalized = normalize_simple_span(span)
437
+ if normalized is not None and normalized["label"] != "PPSN":
438
+ spans.append(normalized)
439
+
440
+ ppsn_spans = word_aligned_ppsn_spans(text, model, tokenizer, threshold=ppsn_min_score)
441
+ for span in ppsn_spans:
442
+ value = text[int(span["start"]) : int(span["end"])]
443
+ if plausible_label_text("PPSN", value):
444
+ spans.append(
445
+ {
446
+ "label": "PPSN",
447
+ "start": int(span["start"]),
448
+ "end": int(span["end"]),
449
+ "score": float(span.get("score", 0.0)),
450
+ "text": value,
451
+ }
452
+ )
453
+
454
+ repairs = []
455
+ ppsn_pieces, ppsn_scores = word_scores_for_label(text, model, tokenizer, "PPSN")
456
+ repairs.extend(regex_guided_label_spans(text, "PPSN", ppsn_min_score, ppsn_pieces, ppsn_scores))
457
+ for label, threshold in thresholds.items():
458
+ pieces, scores = word_scores_for_label(text, model, tokenizer, label)
459
+ repairs.extend(_word_aligned_label_spans_from_scores(text, label, threshold, pieces, scores))
460
+ repairs.extend(regex_guided_label_spans(text, label, threshold, pieces, scores))
461
+
462
+ for candidate in repairs:
463
+ updated = []
464
+ replaced = False
465
+ for span in spans:
466
+ if not overlaps(candidate, span):
467
+ updated.append(span)
468
+ continue
469
+ if candidate["label"] == span["label"] and span_length(candidate) > span_length(span):
470
+ replaced = True
471
+ continue
472
+ if candidate["label"] in FORMAT_LABELS and span["label"] in FORMAT_LABELS and span_length(candidate) > span_length(span):
473
+ replaced = True
474
+ continue
475
+ updated.append(span)
476
+ spans = updated
477
+ if replaced or not any(overlaps(candidate, span) for span in spans):
478
+ spans.append(candidate)
479
+
480
+ return dedupe_and_sort(spans)
481
+
482
+
483
+ def repair_irish_core_spans_onnx(
484
+ text: str,
485
+ session,
486
+ tokenizer,
487
+ config,
488
+ other_min_score: float,
489
+ ppsn_min_score: float,
490
+ label_thresholds: dict[str, float] | None = None,
491
+ general_outputs: list[dict] | None = None,
492
+ ):
493
+ from onnx_token_classifier import simple_aggregate_spans_onnx, word_aligned_ppsn_spans_onnx
494
+
495
+ thresholds = dict(DEFAULT_LABEL_THRESHOLDS)
496
+ if label_thresholds:
497
+ thresholds.update({key.upper(): value for key, value in label_thresholds.items()})
498
+
499
+ spans = []
500
+ if general_outputs is None:
501
+ general_outputs = simple_aggregate_spans_onnx(
502
+ text,
503
+ session,
504
+ tokenizer,
505
+ config,
506
+ min_score=other_min_score,
507
+ )
508
+ for span in pipeline_to_spans(text, general_outputs, min_score=other_min_score):
509
+ normalized = normalize_simple_span(span)
510
+ if normalized is not None and normalized["label"] != "PPSN":
511
+ spans.append(normalized)
512
+
513
+ ppsn_spans = word_aligned_ppsn_spans_onnx(text, session, tokenizer, config, threshold=ppsn_min_score)
514
+ for span in ppsn_spans:
515
+ value = text[int(span["start"]) : int(span["end"])]
516
+ if plausible_label_text("PPSN", value):
517
+ spans.append(
518
+ {
519
+ "label": "PPSN",
520
+ "start": int(span["start"]),
521
+ "end": int(span["end"]),
522
+ "score": float(span.get("score", 0.0)),
523
+ "text": value,
524
+ }
525
+ )
526
+
527
+ repairs = []
528
+ ppsn_pieces, ppsn_scores = word_scores_for_label_onnx(text, session, tokenizer, config, "PPSN")
529
+ repairs.extend(regex_guided_label_spans(text, "PPSN", ppsn_min_score, ppsn_pieces, ppsn_scores))
530
+ for label, threshold in thresholds.items():
531
+ pieces, scores = word_scores_for_label_onnx(text, session, tokenizer, config, label)
532
+ repairs.extend(_word_aligned_label_spans_from_scores(text, label, threshold, pieces, scores))
533
+ repairs.extend(regex_guided_label_spans(text, label, threshold, pieces, scores))
534
+
535
+ for candidate in repairs:
536
+ updated = []
537
+ replaced = False
538
+ for span in spans:
539
+ if not overlaps(candidate, span):
540
+ updated.append(span)
541
+ continue
542
+ if candidate["label"] == span["label"] and span_length(candidate) > span_length(span):
543
+ replaced = True
544
+ continue
545
+ if candidate["label"] in FORMAT_LABELS and span["label"] in FORMAT_LABELS and span_length(candidate) > span_length(span):
546
+ replaced = True
547
+ continue
548
+ updated.append(span)
549
+ spans = updated
550
+ if replaced or not any(overlaps(candidate, span) for span in spans):
551
+ spans.append(candidate)
552
+
553
+ return dedupe_and_sort(spans)
label_meta.json ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "models/openmed-mliteclinical-irish-core-v21_overlaprepair_lite_full_s120",
3
+ "label_list": [
4
+ "O",
5
+ "B-account_number",
6
+ "B-age",
7
+ "B-api_key",
8
+ "B-bank_routing_number",
9
+ "B-biometric_identifier",
10
+ "B-blood_type",
11
+ "B-certificate_license_number",
12
+ "B-city",
13
+ "B-company_name",
14
+ "B-coordinate",
15
+ "B-country",
16
+ "B-county",
17
+ "B-credit_debit_card",
18
+ "B-customer_id",
19
+ "B-cvv",
20
+ "B-date",
21
+ "B-date_of_birth",
22
+ "B-date_time",
23
+ "B-device_identifier",
24
+ "B-education_level",
25
+ "B-email",
26
+ "B-employee_id",
27
+ "B-employment_status",
28
+ "B-fax_number",
29
+ "B-first_name",
30
+ "B-gender",
31
+ "B-health_plan_beneficiary_number",
32
+ "B-http_cookie",
33
+ "B-ipv4",
34
+ "B-ipv6",
35
+ "B-language",
36
+ "B-last_name",
37
+ "B-license_plate",
38
+ "B-mac_address",
39
+ "B-medical_record_number",
40
+ "B-occupation",
41
+ "B-password",
42
+ "B-phone_number",
43
+ "B-pin",
44
+ "B-political_view",
45
+ "B-postcode",
46
+ "B-race_ethnicity",
47
+ "B-religious_belief",
48
+ "B-sexuality",
49
+ "B-ssn",
50
+ "B-state",
51
+ "B-street_address",
52
+ "B-swift_bic",
53
+ "B-tax_id",
54
+ "B-time",
55
+ "B-unique_id",
56
+ "B-url",
57
+ "B-user_name",
58
+ "B-vehicle_identifier",
59
+ "I-account_number",
60
+ "I-api_key",
61
+ "I-biometric_identifier",
62
+ "I-blood_type",
63
+ "I-certificate_license_number",
64
+ "I-city",
65
+ "I-company_name",
66
+ "I-coordinate",
67
+ "I-country",
68
+ "I-county",
69
+ "I-credit_debit_card",
70
+ "I-customer_id",
71
+ "I-date",
72
+ "I-date_of_birth",
73
+ "I-date_time",
74
+ "I-device_identifier",
75
+ "I-education_level",
76
+ "I-email",
77
+ "I-employee_id",
78
+ "I-employment_status",
79
+ "I-fax_number",
80
+ "I-first_name",
81
+ "I-gender",
82
+ "I-health_plan_beneficiary_number",
83
+ "I-http_cookie",
84
+ "I-ipv4",
85
+ "I-ipv6",
86
+ "I-language",
87
+ "I-last_name",
88
+ "I-license_plate",
89
+ "I-mac_address",
90
+ "I-medical_record_number",
91
+ "I-occupation",
92
+ "I-password",
93
+ "I-phone_number",
94
+ "I-pin",
95
+ "I-political_view",
96
+ "I-postcode",
97
+ "I-race_ethnicity",
98
+ "I-religious_belief",
99
+ "I-sexuality",
100
+ "I-ssn",
101
+ "I-state",
102
+ "I-street_address",
103
+ "I-swift_bic",
104
+ "I-tax_id",
105
+ "I-time",
106
+ "I-unique_id",
107
+ "I-url",
108
+ "I-user_name",
109
+ "I-vehicle_identifier",
110
+ "B-PPSN",
111
+ "I-PPSN",
112
+ "B-PASSPORT_NUMBER",
113
+ "I-PASSPORT_NUMBER",
114
+ "I-bank_routing_number"
115
+ ],
116
+ "num_labels": 111,
117
+ "target_label": "PPSN",
118
+ "extra_labels": [
119
+ "PPSN",
120
+ "PASSPORT_NUMBER",
121
+ "PHONE_NUMBER",
122
+ "POSTCODE",
123
+ "BANK_ROUTING_NUMBER",
124
+ "ACCOUNT_NUMBER",
125
+ "CREDIT_DEBIT_CARD",
126
+ "EMAIL",
127
+ "FIRST_NAME",
128
+ "LAST_NAME",
129
+ "SWIFT_BIC"
130
+ ]
131
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28bf5137f337a8712d5208abb848706ef434bd9ba37e852d7d2153b415d9d1de
3
+ size 539290124
onnx/config.json ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForTokenClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "dtype": "float32",
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "O",
13
+ "1": "B-account_number",
14
+ "2": "B-age",
15
+ "3": "B-api_key",
16
+ "4": "B-bank_routing_number",
17
+ "5": "B-biometric_identifier",
18
+ "6": "B-blood_type",
19
+ "7": "B-certificate_license_number",
20
+ "8": "B-city",
21
+ "9": "B-company_name",
22
+ "10": "B-coordinate",
23
+ "11": "B-country",
24
+ "12": "B-county",
25
+ "13": "B-credit_debit_card",
26
+ "14": "B-customer_id",
27
+ "15": "B-cvv",
28
+ "16": "B-date",
29
+ "17": "B-date_of_birth",
30
+ "18": "B-date_time",
31
+ "19": "B-device_identifier",
32
+ "20": "B-education_level",
33
+ "21": "B-email",
34
+ "22": "B-employee_id",
35
+ "23": "B-employment_status",
36
+ "24": "B-fax_number",
37
+ "25": "B-first_name",
38
+ "26": "B-gender",
39
+ "27": "B-health_plan_beneficiary_number",
40
+ "28": "B-http_cookie",
41
+ "29": "B-ipv4",
42
+ "30": "B-ipv6",
43
+ "31": "B-language",
44
+ "32": "B-last_name",
45
+ "33": "B-license_plate",
46
+ "34": "B-mac_address",
47
+ "35": "B-medical_record_number",
48
+ "36": "B-occupation",
49
+ "37": "B-password",
50
+ "38": "B-phone_number",
51
+ "39": "B-pin",
52
+ "40": "B-political_view",
53
+ "41": "B-postcode",
54
+ "42": "B-race_ethnicity",
55
+ "43": "B-religious_belief",
56
+ "44": "B-sexuality",
57
+ "45": "B-ssn",
58
+ "46": "B-state",
59
+ "47": "B-street_address",
60
+ "48": "B-swift_bic",
61
+ "49": "B-tax_id",
62
+ "50": "B-time",
63
+ "51": "B-unique_id",
64
+ "52": "B-url",
65
+ "53": "B-user_name",
66
+ "54": "B-vehicle_identifier",
67
+ "55": "I-account_number",
68
+ "56": "I-api_key",
69
+ "57": "I-biometric_identifier",
70
+ "58": "I-blood_type",
71
+ "59": "I-certificate_license_number",
72
+ "60": "I-city",
73
+ "61": "I-company_name",
74
+ "62": "I-coordinate",
75
+ "63": "I-country",
76
+ "64": "I-county",
77
+ "65": "I-credit_debit_card",
78
+ "66": "I-customer_id",
79
+ "67": "I-date",
80
+ "68": "I-date_of_birth",
81
+ "69": "I-date_time",
82
+ "70": "I-device_identifier",
83
+ "71": "I-education_level",
84
+ "72": "I-email",
85
+ "73": "I-employee_id",
86
+ "74": "I-employment_status",
87
+ "75": "I-fax_number",
88
+ "76": "I-first_name",
89
+ "77": "I-gender",
90
+ "78": "I-health_plan_beneficiary_number",
91
+ "79": "I-http_cookie",
92
+ "80": "I-ipv4",
93
+ "81": "I-ipv6",
94
+ "82": "I-language",
95
+ "83": "I-last_name",
96
+ "84": "I-license_plate",
97
+ "85": "I-mac_address",
98
+ "86": "I-medical_record_number",
99
+ "87": "I-occupation",
100
+ "88": "I-password",
101
+ "89": "I-phone_number",
102
+ "90": "I-pin",
103
+ "91": "I-political_view",
104
+ "92": "I-postcode",
105
+ "93": "I-race_ethnicity",
106
+ "94": "I-religious_belief",
107
+ "95": "I-sexuality",
108
+ "96": "I-ssn",
109
+ "97": "I-state",
110
+ "98": "I-street_address",
111
+ "99": "I-swift_bic",
112
+ "100": "I-tax_id",
113
+ "101": "I-time",
114
+ "102": "I-unique_id",
115
+ "103": "I-url",
116
+ "104": "I-user_name",
117
+ "105": "I-vehicle_identifier",
118
+ "106": "B-PPSN",
119
+ "107": "I-PPSN",
120
+ "108": "B-PASSPORT_NUMBER",
121
+ "109": "I-PASSPORT_NUMBER",
122
+ "110": "I-bank_routing_number"
123
+ },
124
+ "initializer_range": 0.02,
125
+ "label2id": {
126
+ "B-PASSPORT_NUMBER": 108,
127
+ "B-PPSN": 106,
128
+ "B-account_number": 1,
129
+ "B-age": 2,
130
+ "B-api_key": 3,
131
+ "B-bank_routing_number": 4,
132
+ "B-biometric_identifier": 5,
133
+ "B-blood_type": 6,
134
+ "B-certificate_license_number": 7,
135
+ "B-city": 8,
136
+ "B-company_name": 9,
137
+ "B-coordinate": 10,
138
+ "B-country": 11,
139
+ "B-county": 12,
140
+ "B-credit_debit_card": 13,
141
+ "B-customer_id": 14,
142
+ "B-cvv": 15,
143
+ "B-date": 16,
144
+ "B-date_of_birth": 17,
145
+ "B-date_time": 18,
146
+ "B-device_identifier": 19,
147
+ "B-education_level": 20,
148
+ "B-email": 21,
149
+ "B-employee_id": 22,
150
+ "B-employment_status": 23,
151
+ "B-fax_number": 24,
152
+ "B-first_name": 25,
153
+ "B-gender": 26,
154
+ "B-health_plan_beneficiary_number": 27,
155
+ "B-http_cookie": 28,
156
+ "B-ipv4": 29,
157
+ "B-ipv6": 30,
158
+ "B-language": 31,
159
+ "B-last_name": 32,
160
+ "B-license_plate": 33,
161
+ "B-mac_address": 34,
162
+ "B-medical_record_number": 35,
163
+ "B-occupation": 36,
164
+ "B-password": 37,
165
+ "B-phone_number": 38,
166
+ "B-pin": 39,
167
+ "B-political_view": 40,
168
+ "B-postcode": 41,
169
+ "B-race_ethnicity": 42,
170
+ "B-religious_belief": 43,
171
+ "B-sexuality": 44,
172
+ "B-ssn": 45,
173
+ "B-state": 46,
174
+ "B-street_address": 47,
175
+ "B-swift_bic": 48,
176
+ "B-tax_id": 49,
177
+ "B-time": 50,
178
+ "B-unique_id": 51,
179
+ "B-url": 52,
180
+ "B-user_name": 53,
181
+ "B-vehicle_identifier": 54,
182
+ "I-PASSPORT_NUMBER": 109,
183
+ "I-PPSN": 107,
184
+ "I-account_number": 55,
185
+ "I-api_key": 56,
186
+ "I-bank_routing_number": 110,
187
+ "I-biometric_identifier": 57,
188
+ "I-blood_type": 58,
189
+ "I-certificate_license_number": 59,
190
+ "I-city": 60,
191
+ "I-company_name": 61,
192
+ "I-coordinate": 62,
193
+ "I-country": 63,
194
+ "I-county": 64,
195
+ "I-credit_debit_card": 65,
196
+ "I-customer_id": 66,
197
+ "I-date": 67,
198
+ "I-date_of_birth": 68,
199
+ "I-date_time": 69,
200
+ "I-device_identifier": 70,
201
+ "I-education_level": 71,
202
+ "I-email": 72,
203
+ "I-employee_id": 73,
204
+ "I-employment_status": 74,
205
+ "I-fax_number": 75,
206
+ "I-first_name": 76,
207
+ "I-gender": 77,
208
+ "I-health_plan_beneficiary_number": 78,
209
+ "I-http_cookie": 79,
210
+ "I-ipv4": 80,
211
+ "I-ipv6": 81,
212
+ "I-language": 82,
213
+ "I-last_name": 83,
214
+ "I-license_plate": 84,
215
+ "I-mac_address": 85,
216
+ "I-medical_record_number": 86,
217
+ "I-occupation": 87,
218
+ "I-password": 88,
219
+ "I-phone_number": 89,
220
+ "I-pin": 90,
221
+ "I-political_view": 91,
222
+ "I-postcode": 92,
223
+ "I-race_ethnicity": 93,
224
+ "I-religious_belief": 94,
225
+ "I-sexuality": 95,
226
+ "I-ssn": 96,
227
+ "I-state": 97,
228
+ "I-street_address": 98,
229
+ "I-swift_bic": 99,
230
+ "I-tax_id": 100,
231
+ "I-time": 101,
232
+ "I-unique_id": 102,
233
+ "I-url": 103,
234
+ "I-user_name": 104,
235
+ "I-vehicle_identifier": 105,
236
+ "O": 0
237
+ },
238
+ "max_position_embeddings": 512,
239
+ "model_type": "distilbert",
240
+ "n_heads": 12,
241
+ "n_layers": 6,
242
+ "output_past": true,
243
+ "pad_token_id": 0,
244
+ "qa_dropout": 0.1,
245
+ "seq_classif_dropout": 0.2,
246
+ "sinusoidal_pos_embds": false,
247
+ "tie_weights_": true,
248
+ "transformers_version": "4.57.6",
249
+ "vocab_size": 119547
250
+ }
onnx/label_meta.json ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "models/openmed-mliteclinical-irish-core-v21_overlaprepair_lite_full_s120",
3
+ "label_list": [
4
+ "O",
5
+ "B-account_number",
6
+ "B-age",
7
+ "B-api_key",
8
+ "B-bank_routing_number",
9
+ "B-biometric_identifier",
10
+ "B-blood_type",
11
+ "B-certificate_license_number",
12
+ "B-city",
13
+ "B-company_name",
14
+ "B-coordinate",
15
+ "B-country",
16
+ "B-county",
17
+ "B-credit_debit_card",
18
+ "B-customer_id",
19
+ "B-cvv",
20
+ "B-date",
21
+ "B-date_of_birth",
22
+ "B-date_time",
23
+ "B-device_identifier",
24
+ "B-education_level",
25
+ "B-email",
26
+ "B-employee_id",
27
+ "B-employment_status",
28
+ "B-fax_number",
29
+ "B-first_name",
30
+ "B-gender",
31
+ "B-health_plan_beneficiary_number",
32
+ "B-http_cookie",
33
+ "B-ipv4",
34
+ "B-ipv6",
35
+ "B-language",
36
+ "B-last_name",
37
+ "B-license_plate",
38
+ "B-mac_address",
39
+ "B-medical_record_number",
40
+ "B-occupation",
41
+ "B-password",
42
+ "B-phone_number",
43
+ "B-pin",
44
+ "B-political_view",
45
+ "B-postcode",
46
+ "B-race_ethnicity",
47
+ "B-religious_belief",
48
+ "B-sexuality",
49
+ "B-ssn",
50
+ "B-state",
51
+ "B-street_address",
52
+ "B-swift_bic",
53
+ "B-tax_id",
54
+ "B-time",
55
+ "B-unique_id",
56
+ "B-url",
57
+ "B-user_name",
58
+ "B-vehicle_identifier",
59
+ "I-account_number",
60
+ "I-api_key",
61
+ "I-biometric_identifier",
62
+ "I-blood_type",
63
+ "I-certificate_license_number",
64
+ "I-city",
65
+ "I-company_name",
66
+ "I-coordinate",
67
+ "I-country",
68
+ "I-county",
69
+ "I-credit_debit_card",
70
+ "I-customer_id",
71
+ "I-date",
72
+ "I-date_of_birth",
73
+ "I-date_time",
74
+ "I-device_identifier",
75
+ "I-education_level",
76
+ "I-email",
77
+ "I-employee_id",
78
+ "I-employment_status",
79
+ "I-fax_number",
80
+ "I-first_name",
81
+ "I-gender",
82
+ "I-health_plan_beneficiary_number",
83
+ "I-http_cookie",
84
+ "I-ipv4",
85
+ "I-ipv6",
86
+ "I-language",
87
+ "I-last_name",
88
+ "I-license_plate",
89
+ "I-mac_address",
90
+ "I-medical_record_number",
91
+ "I-occupation",
92
+ "I-password",
93
+ "I-phone_number",
94
+ "I-pin",
95
+ "I-political_view",
96
+ "I-postcode",
97
+ "I-race_ethnicity",
98
+ "I-religious_belief",
99
+ "I-sexuality",
100
+ "I-ssn",
101
+ "I-state",
102
+ "I-street_address",
103
+ "I-swift_bic",
104
+ "I-tax_id",
105
+ "I-time",
106
+ "I-unique_id",
107
+ "I-url",
108
+ "I-user_name",
109
+ "I-vehicle_identifier",
110
+ "B-PPSN",
111
+ "I-PPSN",
112
+ "B-PASSPORT_NUMBER",
113
+ "I-PASSPORT_NUMBER",
114
+ "I-bank_routing_number"
115
+ ],
116
+ "num_labels": 111,
117
+ "target_label": "PPSN",
118
+ "extra_labels": [
119
+ "PPSN",
120
+ "PASSPORT_NUMBER",
121
+ "PHONE_NUMBER",
122
+ "POSTCODE",
123
+ "BANK_ROUTING_NUMBER",
124
+ "ACCOUNT_NUMBER",
125
+ "CREDIT_DEBIT_CARD",
126
+ "EMAIL",
127
+ "FIRST_NAME",
128
+ "LAST_NAME",
129
+ "SWIFT_BIC"
130
+ ]
131
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fdb5aaf79c011096cbda3f322dcd54f089b79b1e818a54b2a7ed08f2a4d67bb
3
+ size 539431661
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfca716e6121e0fbcff98bc4236b306cd9dd02c0d4c9d2fa200ec45386602c0e
3
+ size 412032376
onnx/onnx_export.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "source_model": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc5#onnx_full",
3
+ "onnx_path": "onnx/model.onnx",
4
+ "method": "exported_with_optimum",
5
+ "preferred_onnx_file": null,
6
+ "opset": 18,
7
+ "max_length": 256,
8
+ "copied_assets": [
9
+ "config.json",
10
+ "label_meta.json",
11
+ "special_tokens_map.json",
12
+ "tokenizer.json",
13
+ "tokenizer_config.json",
14
+ "vocab.txt"
15
+ ],
16
+ "external_data": false,
17
+ "task": "token-classification"
18
+ }
onnx/quantization.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "source_dir": "onnx/model.onnx",
3
+ "input_model": "models/openmed-mliteclinical-irish-core-v23_fprepair_full_s100_onnx_pp_q8_pc/model.onnx",
4
+ "output_model": "models/openmed-mliteclinical-irish-core-v23_fprepair_full_s100_onnx_pp_q8_pc/model_quantized.onnx",
5
+ "weight_type": "qint8",
6
+ "per_channel": true,
7
+ "reduce_range": false,
8
+ "op_types": [
9
+ "MatMul",
10
+ "Gemm",
11
+ "Attention"
12
+ ],
13
+ "copied_assets": [
14
+ "models/openmed-mliteclinical-irish-core-v23_fprepair_full_s100_onnx_pp/model.onnx",
15
+ "onnx_export.json",
16
+ "config.json",
17
+ "label_meta.json",
18
+ "special_tokens_map.json",
19
+ "tokenizer.json",
20
+ "tokenizer_config.json",
21
+ "vocab.txt"
22
+ ],
23
+ "format": "onnx_dynamic_quantized",
24
+ "task": "token-classification",
25
+ "source_model": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc5#onnx_full"
26
+ }
onnx/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
onnx/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
onnx/tokenizer_config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "extra_special_tokens": {},
48
+ "fix_mistral_regex": true,
49
+ "mask_token": "[MASK]",
50
+ "max_length": 512,
51
+ "model_max_length": 512,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "stride": 0,
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "DistilBertTokenizer",
58
+ "truncation_side": "right",
59
+ "truncation_strategy": "longest_first",
60
+ "unk_token": "[UNK]"
61
+ }
onnx/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx_token_classifier.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import json
3
+ import os
4
+ import tempfile
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ os.environ.setdefault("TRANSFORMERS_NO_TF", "1")
9
+ os.environ.setdefault("TRANSFORMERS_NO_FLAX", "1")
10
+ os.environ.setdefault("TRANSFORMERS_NO_TORCHVISION", "1")
11
+ os.environ["USE_TF"] = "0"
12
+ os.environ["USE_FLAX"] = "0"
13
+ os.environ["USE_TORCH"] = "1"
14
+
15
+ import numpy as np
16
+ import regex as re
17
+ from huggingface_hub import HfApi, hf_hub_download
18
+ from transformers import AutoConfig, AutoTokenizer
19
+
20
+
21
+ TOKEN_RE = re.compile(r"[A-Za-z0-9]+|[^\w\s]", re.UNICODE)
22
+ DEFAULT_ONNX_FILES = [
23
+ "onnx/model_quantized.onnx",
24
+ "model_quantized.onnx",
25
+ "onnx/model.onnx",
26
+ "model.onnx",
27
+ ]
28
+ EIRCODE_RE = re.compile(r"^(?:[ACDEFHKNPRTVWXY]\d{2}|D6W)\s?[0-9ACDEFHKNPRTVWXY]{4}$", re.IGNORECASE)
29
+ TOKENIZER_FILES = [
30
+ "tokenizer_config.json",
31
+ "tokenizer.json",
32
+ "special_tokens_map.json",
33
+ "vocab.txt",
34
+ "vocab.json",
35
+ "merges.txt",
36
+ "added_tokens.json",
37
+ "sentencepiece.bpe.model",
38
+ "spiece.model",
39
+ ]
40
+
41
+
42
+ def tokenize_with_spans(text: str):
43
+ return [(m.group(0), m.start(), m.end()) for m in TOKEN_RE.finditer(text)]
44
+
45
+
46
+ def normalize_label(label: str) -> str:
47
+ label = (label or "").strip()
48
+ if label.startswith("B-") or label.startswith("I-"):
49
+ label = label[2:]
50
+ return label.upper()
51
+
52
+
53
+ def looks_like_eircode(value: str) -> bool:
54
+ return EIRCODE_RE.match(value.strip()) is not None
55
+
56
+
57
+ def _sanitize_tokenizer_dir(tokenizer_path: Path) -> str:
58
+ tokenizer_cfg_path = tokenizer_path / "tokenizer_config.json"
59
+ if not tokenizer_cfg_path.exists():
60
+ return str(tokenizer_path)
61
+ data = json.loads(tokenizer_cfg_path.read_text(encoding="utf-8"))
62
+ if "fix_mistral_regex" not in data:
63
+ return str(tokenizer_path)
64
+ tmpdir = Path(tempfile.mkdtemp(prefix="openmed_onnx_tokenizer_"))
65
+ keep = set(TOKENIZER_FILES)
66
+ for child in tokenizer_path.iterdir():
67
+ if child.is_file() and child.name in keep:
68
+ target = tmpdir / child.name
69
+ target.write_bytes(child.read_bytes())
70
+ data.pop("fix_mistral_regex", None)
71
+ (tmpdir / "tokenizer_config.json").write_text(
72
+ json.dumps(data, ensure_ascii=False, indent=2) + "\n",
73
+ encoding="utf-8",
74
+ )
75
+ return str(tmpdir)
76
+
77
+
78
+ def _materialize_remote_tokenizer(repo_id: str) -> str:
79
+ api = HfApi()
80
+ files = set(api.list_repo_files(repo_id=repo_id, repo_type="model"))
81
+ tmpdir = Path(tempfile.mkdtemp(prefix="openmed_remote_tokenizer_"))
82
+ copied = False
83
+ for name in TOKENIZER_FILES:
84
+ if name not in files:
85
+ continue
86
+ src = hf_hub_download(repo_id=repo_id, filename=name, repo_type="model")
87
+ (tmpdir / Path(name).name).write_bytes(Path(src).read_bytes())
88
+ copied = True
89
+ if not copied:
90
+ return repo_id
91
+ return _sanitize_tokenizer_dir(tmpdir)
92
+
93
+
94
+ def safe_auto_tokenizer(tokenizer_ref: str):
95
+ tokenizer_path = Path(tokenizer_ref)
96
+ if tokenizer_path.exists():
97
+ tokenizer_ref = _sanitize_tokenizer_dir(tokenizer_path)
98
+ else:
99
+ tokenizer_ref = _materialize_remote_tokenizer(tokenizer_ref)
100
+
101
+ try:
102
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=True, fix_mistral_regex=True)
103
+ except Exception:
104
+ pass
105
+ try:
106
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=True, fix_mistral_regex=False)
107
+ except TypeError:
108
+ pass
109
+ try:
110
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=True)
111
+ except Exception:
112
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=False)
113
+
114
+
115
+ def _load_tokenizer(tokenizer_ref: str):
116
+ return safe_auto_tokenizer(tokenizer_ref)
117
+
118
+
119
+ def _resolve_local_onnx(model_path: Path, preferred: str | None = None) -> Path:
120
+ candidates = ([preferred] if preferred else []) + DEFAULT_ONNX_FILES
121
+ for candidate in candidates:
122
+ if not candidate:
123
+ continue
124
+ path = model_path / candidate
125
+ if path.exists():
126
+ return path
127
+ raise FileNotFoundError(f"No ONNX file found under {model_path}")
128
+
129
+
130
+ def _resolve_remote_onnx(model_ref: str, preferred: str | None = None) -> Path:
131
+ api = HfApi()
132
+ files = set(api.list_repo_files(repo_id=model_ref, repo_type="model"))
133
+ candidates = ([preferred] if preferred else []) + DEFAULT_ONNX_FILES
134
+ for candidate in candidates:
135
+ if candidate and candidate in files:
136
+ return Path(hf_hub_download(repo_id=model_ref, filename=candidate, repo_type="model"))
137
+ raise FileNotFoundError(f"No ONNX file published for {model_ref}")
138
+
139
+
140
+ def load_onnx_token_classifier(
141
+ model_ref: str,
142
+ onnx_file: str | None = None,
143
+ providers: list[str] | None = None,
144
+ ):
145
+ import onnxruntime as ort
146
+
147
+ model_path = Path(model_ref)
148
+ if model_path.exists():
149
+ onnx_path = _resolve_local_onnx(model_path, preferred=onnx_file)
150
+ config = AutoConfig.from_pretrained(model_ref)
151
+ tokenizer = safe_auto_tokenizer(model_ref)
152
+ else:
153
+ onnx_path = _resolve_remote_onnx(model_ref, preferred=onnx_file)
154
+ config = AutoConfig.from_pretrained(model_ref)
155
+ tokenizer = safe_auto_tokenizer(model_ref)
156
+
157
+ session = ort.InferenceSession(str(onnx_path), providers=providers or ["CPUExecutionProvider"])
158
+ return session, tokenizer, config, onnx_path
159
+
160
+
161
+ def _run_onnx(session, encoded: dict[str, Any]) -> np.ndarray:
162
+ feed = {}
163
+ input_names = {item.name for item in session.get_inputs()}
164
+ for key, value in encoded.items():
165
+ if key == "offset_mapping":
166
+ continue
167
+ if key in input_names:
168
+ feed[key] = value
169
+ outputs = session.run(None, feed)
170
+ return outputs[0]
171
+
172
+
173
+ def _softmax(logits: np.ndarray, axis: int = -1) -> np.ndarray:
174
+ shifted = logits - np.max(logits, axis=axis, keepdims=True)
175
+ exp = np.exp(shifted)
176
+ return exp / np.clip(np.sum(exp, axis=axis, keepdims=True), 1e-12, None)
177
+
178
+
179
+ def _split_tag(label: str) -> tuple[str, str]:
180
+ if label.startswith("B-") or label.startswith("I-"):
181
+ return label[:1], label[2:]
182
+ return "B", label
183
+
184
+
185
+ def simple_aggregate_spans_onnx(
186
+ text: str,
187
+ session,
188
+ tokenizer,
189
+ config,
190
+ min_score: float = 0.5,
191
+ ) -> list[dict[str, Any]]:
192
+ encoded = tokenizer(text, return_offsets_mapping=True, return_tensors="np", truncation=True)
193
+ logits = _run_onnx(session, encoded)[0]
194
+ probs = _softmax(logits, axis=-1)
195
+ pred_ids = probs.argmax(axis=-1)
196
+ id2label = {int(k): v for k, v in config.id2label.items()}
197
+ offsets = encoded["offset_mapping"][0].tolist()
198
+ attention_mask = encoded.get("attention_mask")
199
+ if attention_mask is None:
200
+ attention = [1] * len(offsets)
201
+ else:
202
+ attention = attention_mask[0].tolist()
203
+
204
+ spans: list[dict[str, Any]] = []
205
+ active: dict[str, Any] | None = None
206
+ for idx, ((start, end), keep) in enumerate(zip(offsets, attention)):
207
+ if not keep or start == end:
208
+ if active is not None:
209
+ spans.append(active)
210
+ active = None
211
+ continue
212
+
213
+ label = id2label[int(pred_ids[idx])]
214
+ if label == "O":
215
+ if active is not None:
216
+ spans.append(active)
217
+ active = None
218
+ continue
219
+
220
+ score = float(probs[idx, int(pred_ids[idx])])
221
+ if score < min_score:
222
+ if active is not None:
223
+ spans.append(active)
224
+ active = None
225
+ continue
226
+
227
+ prefix, entity = _split_tag(label)
228
+ if (
229
+ active is None
230
+ or prefix == "B"
231
+ or entity != active["entity_group"]
232
+ or int(start) > int(active["end"]) + 1
233
+ ):
234
+ if active is not None:
235
+ spans.append(active)
236
+ active = {
237
+ "entity_group": entity,
238
+ "start": int(start),
239
+ "end": int(end),
240
+ "score": score,
241
+ }
242
+ else:
243
+ active["end"] = int(end)
244
+ active["score"] = max(float(active["score"]), score)
245
+
246
+ if active is not None:
247
+ spans.append(active)
248
+
249
+ for span in spans:
250
+ span["word"] = text[span["start"] : span["end"]]
251
+ return spans
252
+
253
+
254
+ def ppsn_label_ids_from_config(config) -> list[int]:
255
+ ids = []
256
+ for raw_id, raw_label in config.id2label.items():
257
+ label_id = int(raw_id)
258
+ label = str(raw_label or "").strip()
259
+ if label.endswith("PPSN"):
260
+ ids.append(label_id)
261
+ return sorted(ids)
262
+
263
+
264
+ def word_aligned_ppsn_spans_onnx(
265
+ text: str,
266
+ session,
267
+ tokenizer,
268
+ config,
269
+ threshold: float = 0.4,
270
+ ) -> list[dict[str, Any]]:
271
+ pieces = tokenize_with_spans(text)
272
+ if not pieces:
273
+ return []
274
+
275
+ words = [word for word, _, _ in pieces]
276
+ encoded = tokenizer(words, is_split_into_words=True, return_tensors="np", truncation=True)
277
+ word_ids = encoded.word_ids(batch_index=0)
278
+ logits = _run_onnx(session, encoded)[0]
279
+ probs = _softmax(logits, axis=-1)
280
+ label_ids = ppsn_label_ids_from_config(config)
281
+
282
+ word_scores: list[float] = []
283
+ for word_index in range(len(pieces)):
284
+ score = 0.0
285
+ for token_index, wid in enumerate(word_ids):
286
+ if wid != word_index:
287
+ continue
288
+ for label_id in label_ids:
289
+ score = max(score, float(probs[token_index, label_id]))
290
+ word_scores.append(score)
291
+
292
+ spans: list[dict[str, Any]] = []
293
+ active = None
294
+ for (_, start, end), score in zip(pieces, word_scores):
295
+ if score >= threshold:
296
+ if active is None:
297
+ active = {"start": start, "end": end, "score": score}
298
+ else:
299
+ active["end"] = end
300
+ active["score"] = max(active["score"], score)
301
+ elif active is not None:
302
+ spans.append(active)
303
+ active = None
304
+
305
+ if active is not None:
306
+ spans.append(active)
307
+
308
+ for span in spans:
309
+ span["text"] = text[span["start"] : span["end"]]
310
+ span["label"] = "PPSN"
311
+ span["source"] = "onnx"
312
+ return spans
ppsn.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import random
4
+ import re
5
+ from typing import Iterator, Tuple, Dict, Any
6
+
7
+ # PPSN format: 7 digits + check letter + optional suffix letter.
8
+ # Check letter uses weighted sum of digits (8..2) plus suffix value (A=1..Z=26, W/blank=0) * 9.
9
+ # Mod 23, remainder 0 => W, remainder 1..22 => A..V.
10
+ # Publicly documented common suffixes are blank / W for legacy numbers, A or B for
11
+ # post-2013 individual numbers, and H for certain non-individual registrations.
12
+
13
+ COMPACT_RE = re.compile(r"^\d{7}[A-W][A-Z]?$", re.IGNORECASE)
14
+ SEPARATORS_RE = re.compile(r"[\s\-\./\u00A0]+")
15
+ CANDIDATE_RE = re.compile(
16
+ r"\b\d{7}(?:[\s\-\./\u00A0]*[A-Wa-w])(?:[\s\-\./\u00A0]*[A-Za-z])?\b"
17
+ )
18
+
19
+ # Strict mode keeps the commonly documented suffixes seen in public PPSN guidance.
20
+ COMMON_SUFFIXES = ["", "W", "A", "B", "H"]
21
+ EXTENDED_PUBLIC_SUFFIXES = ["", "W"] + [chr(c) for c in range(ord("A"), ord("Z") + 1) if chr(c) != "W"]
22
+ DEFAULT_SUFFIX_SET_STRICT = set(COMMON_SUFFIXES)
23
+
24
+
25
+ def normalize(value: str) -> str:
26
+ value = value.strip().upper()
27
+ value = SEPARATORS_RE.sub("", value)
28
+ return value
29
+
30
+
31
+ def _suffix_value(letter: str) -> int:
32
+ if not letter or letter == "W":
33
+ return 0
34
+ return ord(letter) - ord("A") + 1
35
+
36
+
37
+ def checksum_letter(digits: str, suffix: str = "") -> str:
38
+ if len(digits) != 7 or not digits.isdigit():
39
+ raise ValueError("digits must be 7 numeric characters")
40
+ weights = [8, 7, 6, 5, 4, 3, 2]
41
+ total = sum(int(d) * w for d, w in zip(digits, weights))
42
+ total += _suffix_value(suffix) * 9
43
+ remainder = total % 23
44
+ if remainder == 0:
45
+ return "W"
46
+ return chr(ord("A") + remainder - 1)
47
+
48
+
49
+ def is_valid_ppsn(value: str, strict_suffix: bool = False) -> bool:
50
+ compact = normalize(value)
51
+ if not COMPACT_RE.match(compact):
52
+ return False
53
+ digits = compact[:7]
54
+ check_letter = compact[7]
55
+ suffix = compact[8:] if len(compact) > 8 else ""
56
+ if strict_suffix and suffix not in DEFAULT_SUFFIX_SET_STRICT:
57
+ return False
58
+ return checksum_letter(digits, suffix) == check_letter
59
+
60
+
61
+ def is_plausible_ppsn(value: str) -> bool:
62
+ return COMPACT_RE.match(normalize(value)) is not None
63
+
64
+
65
+ def iter_ppsn_candidates(text: str) -> Iterator[Dict[str, Any]]:
66
+ for m in CANDIDATE_RE.finditer(text):
67
+ raw = m.group(0)
68
+ yield {
69
+ "start": m.start(),
70
+ "end": m.end(),
71
+ "text": raw,
72
+ "normalized": normalize(raw),
73
+ }
74
+
75
+
76
+ def generate_ppsn(suffix_policy: str = "mixed") -> str:
77
+ digits = "".join(str(random.randint(0, 9)) for _ in range(7))
78
+ if suffix_policy == "none":
79
+ suffix = ""
80
+ elif suffix_policy == "legacy":
81
+ suffix = random.choice(["", "", "W"])
82
+ elif suffix_policy == "modern":
83
+ suffix = random.choice(["A", "A", "B", "H"])
84
+ elif suffix_policy in {"mixed", "official_common", "spec"}:
85
+ suffix = random.choice(["", "", "", "A", "A", "B", "H", "W"])
86
+ elif suffix_policy in {"broad", "official_extended"}:
87
+ weighted = ["", "", "W", "A", "A", "B", "H"] + EXTENDED_PUBLIC_SUFFIXES
88
+ suffix = random.choice(weighted)
89
+ else:
90
+ raise ValueError("invalid suffix_policy")
91
+
92
+ check = checksum_letter(digits, suffix)
93
+ return f"{digits}{check}{suffix}"
94
+
95
+
96
+ def corrupt_ppsn(value: str) -> str:
97
+ """Create an invalid near-miss PPSN (wrong checksum or length)."""
98
+ compact = normalize(value)
99
+ if len(compact) >= 8 and compact[:7].isdigit():
100
+ # flip last digit
101
+ d = list(compact)
102
+ d[6] = str((int(d[6]) + random.randint(1, 9)) % 10)
103
+ return "".join(d)
104
+ # fallback: random invalid
105
+ return "".join(str(random.randint(0, 9)) for _ in range(6)) + "ZZ"
106
+
107
+
108
+ def main() -> None:
109
+ parser = argparse.ArgumentParser()
110
+ sub = parser.add_subparsers(dest="cmd", required=True)
111
+
112
+ v = sub.add_parser("validate")
113
+ v.add_argument("--value", required=True)
114
+ v.add_argument("--strict-suffix", action="store_true")
115
+
116
+ g = sub.add_parser("generate")
117
+ g.add_argument("--count", type=int, default=10)
118
+ g.add_argument("--suffix-policy", default="mixed")
119
+
120
+ args = parser.parse_args()
121
+
122
+ if args.cmd == "validate":
123
+ ok = is_valid_ppsn(args.value, strict_suffix=args.strict_suffix)
124
+ print("valid" if ok else "invalid")
125
+ elif args.cmd == "generate":
126
+ for _ in range(args.count):
127
+ print(generate_ppsn(args.suffix_policy))
128
+
129
+
130
+ if __name__ == "__main__":
131
+ main()
pyproject.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "openmed-irish-core-pii"
3
+ version = "0.2.0rc5"
4
+ description = "OpenMed Irish core PII token-classification release"
5
+ requires-python = ">=3.10"
6
+ readme = "README.md"
7
+ license = { text = "Apache-2.0" }
8
+ dependencies = [
9
+ "transformers>=4.41.0",
10
+ "torch",
11
+ "numpy>=1.26.0",
12
+ "regex>=2024.5.15",
13
+ "onnxruntime>=1.20.0",
14
+ "huggingface_hub>=0.36.0",
15
+ ]
16
+
17
+ [tool.uv]
18
+ package = false
qa_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "repo_id": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc6",
3
+ "recommended_inference": {
4
+ "full_checkpoint": {
5
+ "script": "inference_mask.py",
6
+ "ppsn_decoder": "word_aligned",
7
+ "general_decoder": "irish_core_label_aware",
8
+ "ppsn_min_score": 0.55,
9
+ "other_min_score": 0.5,
10
+ "device": "cpu"
11
+ },
12
+ "onnx_q8": {
13
+ "script": "inference_mask_onnx.py",
14
+ "onnx_artifact": "onnx/model_quantized.onnx",
15
+ "ppsn_decoder": "word_aligned",
16
+ "general_decoder": "irish_core_label_aware",
17
+ "ppsn_min_score": 0.55,
18
+ "other_min_score": 0.5,
19
+ "device": "cpu"
20
+ }
21
+ },
22
+ "smoke_texts": [
23
+ "Duradh liom mo uimhir 1234567T a sholatar agus me ag denamh iarratais.",
24
+ "Is e mo upsp na 1234567tw agus teastaionn uaim eolas faoi liuntas curamora.",
25
+ "My PPSN is 1234567T and my sort code is 90-00-17.",
26
+ "Please provide your passport: NN5123456 and call me on 0851234567.",
27
+ "My IBAN is IE29AIBK93115212345678 and my email is aidan.oiarraidh@example.ie."
28
+ ],
29
+ "known_limit_texts": [
30
+ "Passport PA 1234567 was used to board the flight.",
31
+ "Card 4242 4242 4242 4242 in very short contexts should still be QA tested.",
32
+ "Compact mobile numbers in very short mixed-numeric contexts should still be QA tested."
33
+ ]
34
+ }
raw_word_aligned.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import regex as re
3
+ import torch
4
+
5
+
6
+ TOKEN_RE = re.compile(r"[A-Za-z0-9]+|[^\w\s]", re.UNICODE)
7
+
8
+
9
+ def tokenize_with_spans(text: str):
10
+ return [(m.group(0), m.start(), m.end()) for m in TOKEN_RE.finditer(text)]
11
+
12
+
13
+ def ppsn_label_ids(model) -> list[int]:
14
+ ids = []
15
+ for raw_id, raw_label in model.config.id2label.items():
16
+ label_id = int(raw_id)
17
+ label = str(raw_label or "").strip()
18
+ if label.endswith("PPSN"):
19
+ ids.append(label_id)
20
+ return sorted(ids)
21
+
22
+
23
+ def word_aligned_ppsn_spans(text: str, model, tokenizer, threshold: float) -> list[dict]:
24
+ pieces = tokenize_with_spans(text)
25
+ if not pieces:
26
+ return []
27
+
28
+ words = [word for word, _, _ in pieces]
29
+ encoded = tokenizer(words, is_split_into_words=True, return_tensors="pt", truncation=True)
30
+ word_ids = encoded.word_ids(batch_index=0)
31
+
32
+ device = next(model.parameters()).device
33
+ encoded = {k: v.to(device) for k, v in encoded.items()}
34
+
35
+ with torch.no_grad():
36
+ logits = model(**encoded).logits[0]
37
+
38
+ probs = torch.softmax(logits, dim=-1)
39
+ label_ids = ppsn_label_ids(model)
40
+
41
+ word_scores: list[float] = []
42
+ for word_index in range(len(pieces)):
43
+ score = 0.0
44
+ for token_index, wid in enumerate(word_ids):
45
+ if wid != word_index:
46
+ continue
47
+ for label_id in label_ids:
48
+ score = max(score, float(probs[token_index, label_id]))
49
+ word_scores.append(score)
50
+
51
+ spans: list[dict] = []
52
+ active = None
53
+ for (word, start, end), score in zip(pieces, word_scores):
54
+ if score >= threshold:
55
+ if active is None:
56
+ active = {"start": start, "end": end, "score": score}
57
+ else:
58
+ active["end"] = end
59
+ active["score"] = max(active["score"], score)
60
+ elif active is not None:
61
+ spans.append(active)
62
+ active = None
63
+
64
+ if active is not None:
65
+ spans.append(active)
66
+
67
+ for span in spans:
68
+ span["text"] = text[span["start"] : span["end"]]
69
+ span["label"] = "PPSN"
70
+ span["source"] = "model"
71
+
72
+ return spans
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "extra_special_tokens": {},
48
+ "fix_mistral_regex": true,
49
+ "mask_token": "[MASK]",
50
+ "max_length": 512,
51
+ "model_max_length": 512,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "stride": 0,
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "DistilBertTokenizer",
58
+ "truncation_side": "right",
59
+ "truncation_strategy": "longest_first",
60
+ "unk_token": "[UNK]"
61
+ }
training_sources.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1",
3
+ "previous_public_candidate": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc4",
4
+ "stable_public_reference": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v1",
5
+ "release_purpose": "Fifth v2 release candidate for the IrishCorePII line. rc5 keeps the rc4 checkpoint weights but changes the public inference defaults and bundled dynamic q8 artifact to recover Gaelic weak-context PPSN cases and improve CPU q8 throughput.",
6
+ "recommended_thresholds": {
7
+ "full_checkpoint": {
8
+ "ppsn_min_score": 0.55,
9
+ "other_min_score": 0.5
10
+ },
11
+ "onnx_q8": {
12
+ "ppsn_min_score": 0.55,
13
+ "other_min_score": 0.5
14
+ }
15
+ },
16
+ "inference_stack_notes": [
17
+ "PPSN extraction uses the word-aligned decoder.",
18
+ "General Irish core PII extraction uses the label-aware repair decoder for both full and ONNX q8 inference.",
19
+ "The ONNX q8 artifact is built from a preprocessed ONNX export and then dynamically quantized with qint8 per-channel quantization over MatMul,Gemm,Attention."
20
+ ],
21
+ "training_mix_summary": [
22
+ {
23
+ "component": "same fine-tuned full checkpoint weights as rc4",
24
+ "weight": 1.0
25
+ },
26
+ {
27
+ "component": "updated inference calibration for weak-context Gaelic PPSN recovery",
28
+ "weight": 1.0
29
+ },
30
+ {
31
+ "component": "preprocessed ONNX export before dynamic q8 quantization",
32
+ "weight": 1.0
33
+ }
34
+ ],
35
+ "upstream_attribution": [
36
+ {
37
+ "name": "temsa/OpenMed-Irish-PPSN-Eircode-Spec-v1",
38
+ "license": "Apache-2.0"
39
+ },
40
+ {
41
+ "name": "temsa/OpenMed-Irish-CorePII-TrainMix-v1",
42
+ "license": "CC-BY-4.0 composite"
43
+ },
44
+ {
45
+ "name": "joelniklaus/mapa",
46
+ "license": "CC-BY-4.0"
47
+ },
48
+ {
49
+ "name": "gretelai/synthetic_pii_finance_multilingual",
50
+ "license": "Apache-2.0"
51
+ }
52
+ ],
53
+ "quantization_notes": {
54
+ "promoted_q8_recipe": "ONNX Runtime dynamic int8 qint8 per-channel quantization over MatMul,Gemm,Attention after ONNX pre-processing with symbolic shape inference disabled.",
55
+ "rejected_q8_recipes": [
56
+ "non-per-channel dynamic int8",
57
+ "MatMul,Gemm-only dynamic int8 without ONNX pre-processing"
58
+ ]
59
+ },
60
+ "known_limitations": [
61
+ "The full checkpoint still outperforms q8 on the finance-boundary suite.",
62
+ "The strict remaining-gap suite is still weaker on q8 than on the full checkpoint.",
63
+ "Grouped credit/debit card boundary cases remain the main shared weakness."
64
+ ]
65
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff