temsa commited on
Commit
58f9459
·
verified ·
1 Parent(s): f872abc

Release IrishCore-DiffMask-135M-v1-rc1

Browse files

Publish the packaged DiffMask release candidate with full checkpoint, ONNX q8 artifact, benchmark summaries, and model card.

.gitattributes CHANGED
@@ -1,35 +1,5 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
1
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.onnx filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
3
  *.pt filter=lfs diff=lfs merge=lfs -text
4
  *.pth filter=lfs diff=lfs merge=lfs -text
 
5
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ .venv/
LICENSE ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
10
+
11
+ "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
12
+
13
+ "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
14
+
15
+ "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
16
+
17
+ "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
18
+
19
+ "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
20
+
21
+ "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
22
+
23
+ "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
24
+
25
+ "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
26
+
27
+ "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
28
+
29
+ 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
30
+
31
+ 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
32
+
33
+ 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
34
+
35
+ (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
36
+
37
+ (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
38
+
39
+ (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
40
+
41
+ (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
42
+
43
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
44
+
45
+ 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
46
+
47
+ 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
48
+
49
+ 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
50
+
51
+ 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
52
+
53
+ 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
54
+
55
+ END OF TERMS AND CONDITIONS
56
+
57
+ APPENDIX: How to apply the Apache License to your work.
58
+
59
+ To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
60
+
61
+ Copyright [yyyy] [name of copyright owner]
62
+
63
+ Licensed under the Apache License, Version 2.0 (the "License");
64
+ you may not use this file except in compliance with the License.
65
+ You may obtain a copy of the License at
66
+
67
+ http://www.apache.org/licenses/LICENSE-2.0
68
+
69
+ Unless required by applicable law or agreed to in writing, software
70
+ distributed under the License is distributed on an "AS IS" BASIS,
71
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
72
+ See the License for the specific language governing permissions and
73
+ limitations under the License.
NOTICE ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IrishCore-DiffMask-135M-v1-rc1
2
+
3
+ This release is derived from:
4
+ - OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1 (Apache-2.0)
5
+
6
+ Training and evaluation data used for this derivative included:
7
+ - temsa/OpenMed-Irish-CorePII-TrainMix-v1
8
+ - temsa/OpenMed-Irish-PPSN-Eircode-Spec-v1
9
+ - joelniklaus/mapa (CC-BY-4.0)
10
+ - gretelai/synthetic_pii_finance_multilingual (Apache-2.0)
11
+
12
+ Additional local synthetic hardening and replay sets were used during model selection.
13
+ Please review upstream licenses and dataset cards before redistributing derivative datasets.
README.md ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - ga
5
+ license: apache-2.0
6
+ library_name: transformers
7
+ pipeline_tag: token-classification
8
+ tags:
9
+ - pii
10
+ - de-identification
11
+ - token-classification
12
+ - ireland
13
+ - irish
14
+ - gaelic
15
+ - diffusion-style
16
+ - denoising
17
+ - ppsn
18
+ - eircode
19
+ - onnx
20
+ - int8
21
+ - dynamic-quantization
22
+ - cpu
23
+ base_model:
24
+ - OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1
25
+ datasets:
26
+ - temsa/OpenMed-Irish-CorePII-TrainMix-v1
27
+ - temsa/OpenMed-Irish-PPSN-Eircode-Spec-v1
28
+ - joelniklaus/mapa
29
+ - gretelai/synthetic_pii_finance_multilingual
30
+ model-index:
31
+ - name: IrishCore-DiffMask-135M-v1-rc1
32
+ results:
33
+ - task:
34
+ type: token-classification
35
+ name: Irish core PII masking
36
+ dataset:
37
+ type: custom
38
+ name: irish_core_pii_v1
39
+ metrics:
40
+ - type: f1
41
+ name: Overall F1
42
+ value: 0.9934
43
+ - task:
44
+ type: token-classification
45
+ name: Multilingual PPSN masking
46
+ dataset:
47
+ type: custom
48
+ name: multilingual_ppsn_v1_all
49
+ metrics:
50
+ - type: f1
51
+ name: Overall F1
52
+ value: 0.9412
53
+ - task:
54
+ type: token-classification
55
+ name: Hardening exact suite
56
+ dataset:
57
+ type: custom
58
+ name: irish_dllm_hardening_exact_v1
59
+ metrics:
60
+ - type: f1
61
+ name: Overall F1
62
+ value: 0.9744
63
+ ---
64
+
65
+ # IrishCore-DiffMask-135M-v1-rc1
66
+
67
+ `IrishCore-DiffMask-135M-v1-rc1` is a raw-only Irish PII masking model derived from `OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1`.
68
+
69
+ It is a small, scanner-free span extractor tuned for:
70
+
71
+ - `PPSN`
72
+ - `ACCOUNT_NUMBER`
73
+ - `BANK_ROUTING_NUMBER`
74
+ - `CREDIT_DEBIT_CARD`
75
+ - `PASSPORT_NUMBER`
76
+ - `POSTCODE`
77
+ - `PHONE_NUMBER`
78
+ - `EMAIL`
79
+ - `FIRST_NAME`
80
+ - `LAST_NAME`
81
+ - `SWIFT_BIC`
82
+
83
+ The main target is English plus Irish Gaelic text in citizen-support, public-sector, and HSE-style flows. The repo ships both the full `transformers` checkpoint and a dynamic q8 ONNX artifact for CPU deployment.
84
+
85
+ ## What "DiffMask" Means Here
86
+
87
+ This release is not a generative diffusion language model. It is a compact discriminative token-span model trained with a diffusion-style denoising schedule.
88
+
89
+ Concretely:
90
+
91
+ - The encoder starts from the DistilBERT-family weights inside `OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1`.
92
+ - The model adds three task heads over the encoder hidden states:
93
+ - a per-label token-presence head
94
+ - a typed start-boundary head
95
+ - a typed end-boundary head
96
+ - During training, each input sentence is corrupted multiple times by replacing a random fraction of visible tokens with `[MASK]`.
97
+ - The corruption level follows a short noise schedule from heavy masking to light masking.
98
+ - The same gold spans are learned at every noise level, and the losses are averaged across the denoising passes.
99
+ - At inference time there is no diffusion loop and no rewrite step: the model runs once and a score-only span decoder reconstructs spans from token scores plus typed boundaries.
100
+
101
+ So the "DLLM" aspect here is the training recipe: repeated masked denoising over text, not autoregressive generation.
102
+
103
+ ## Architecture
104
+
105
+ - Encoder: DistilBERT-size encoder from the OpenMed mLiteClinical 135M base
106
+ - Heads:
107
+ - token presence per released label
108
+ - typed start boundary per released label
109
+ - typed end boundary per released label
110
+ - Decoder:
111
+ - score-only span decoding from offsets, token continuity, label-specific thresholds, and typed boundaries
112
+ - no regex candidate extractor
113
+ - no checksum validator
114
+ - no scanner layer
115
+
116
+ The release behavior is fully defined by the weights plus the bundled decoder in `common.py`.
117
+
118
+ ## Why This Exists
119
+
120
+ The older `rc5` release still depended on a repair-oriented decoder stack. The public `rc8` release removed that external logic, but it regressed on several structured Irish identifiers. This release keeps the raw-only deployment shape while re-hardening the model on Irish numeric and mixed-PII cases.
121
+
122
+ ## References
123
+
124
+ Direct implementation references:
125
+
126
+ - Devlin et al., *BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding*
127
+ https://arxiv.org/abs/1810.04805
128
+ - Sanh et al., *DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter*
129
+ https://arxiv.org/abs/1910.01108
130
+ - Fu et al., *Boundary Smoothing for Named Entity Recognition*
131
+ https://aclanthology.org/2022.acl-long.490/
132
+ - Wang et al., *SPANNER: Named Entity Re-/Recognition as Span Prediction*
133
+ https://aclanthology.org/2021.acl-long.558/
134
+
135
+ Conceptual diffusion-style training references:
136
+
137
+ - Nie et al., *LLaDA 2.0: Scaling Up Diffusion Language Models to 100B*
138
+ https://arxiv.org/abs/2512.15745
139
+ - Gong et al., *Scaling Diffusion Language Models via Adaptation from Autoregressive Models*
140
+ https://arxiv.org/abs/2410.17891
141
+
142
+ These diffusion papers were used as architectural inspiration for the masked noising schedule. This release does **not** implement a generative text diffusion runtime.
143
+
144
+ ## Included Artifacts
145
+
146
+ - Full `transformers` checkpoint in the repo root
147
+ - Dynamic q8 ONNX export in `onnx/model_quantized.onnx`
148
+ - Unquantized ONNX export in `onnx/model.onnx`
149
+ - `inference_mask.py` for the full checkpoint
150
+ - `inference_mask_onnx.py` for the ONNX q8 path
151
+ - `common.py`, `model.py`, and `multitask_model.py` implementing the release decoder
152
+ - benchmark files in `eval/`
153
+
154
+ Artifact sizes:
155
+
156
+ - Full checkpoint: `514 MB` (`model.safetensors`)
157
+ - Dynamic q8 ONNX: `393 MB` (`onnx/model_quantized.onnx`)
158
+
159
+ ## How To Use It
160
+
161
+ Full checkpoint:
162
+
163
+ ```bash
164
+ uv run python inference_mask.py \
165
+ --model temsa/IrishCore-DiffMask-135M-v1-rc1 \
166
+ --min-score 0.5 \
167
+ --text "My PPSN is 1234567TW, my Eircode is D02 X285, and my phone is 087 123 4567." \
168
+ --json
169
+ ```
170
+
171
+ Dynamic q8 ONNX:
172
+
173
+ ```bash
174
+ uv run python inference_mask_onnx.py \
175
+ --model temsa/IrishCore-DiffMask-135M-v1-rc1 \
176
+ --min-score 0.5 \
177
+ --text "Please provide your passport NN5123456 and call me on 0851234567." \
178
+ --json
179
+ ```
180
+
181
+ Both scripts emit explicit placeholders like `[PII:PPSN]` in `masked_text`.
182
+
183
+ ## Q8 Comparison
184
+
185
+ Deployment-relevant comparison on CPU:
186
+
187
+ | Model | Core F1 | Edge F1 | Finance F1 | Finance-boundary F1 | User PPSN F1 | GA weak PPSN F1 | Multilingual PPSN F1 | Hardening F1 |
188
+ |---|---:|---:|---:|---:|---:|---:|---:|---:|
189
+ | `rc5` ONNX q8 | 0.9669 | 0.9744 | 0.9362 | 0.8750 | 1.0000 | 1.0000 | 0.9333 | - |
190
+ | `rc8` ONNX q8 | 0.9737 | 1.0000 | 1.0000 | 1.0000 | 1.0000 | 1.0000 | 0.9176 | 0.7059 |
191
+ | `IrishCore-DiffMask-135M-v1-rc1` ONNX q8 | 0.9934 | 1.0000 | 1.0000 | 1.0000 | 1.0000 | 1.0000 | 0.9412 | 0.9744 |
192
+
193
+ CPU throughput references:
194
+
195
+ | Suite | `rc5` q8 | `rc8` q8 | `IrishCore-DiffMask-135M-v1-rc1` q8 |
196
+ |---|---:|---:|---:|
197
+ | Irish core short-text path | 33.6193 ex/s | 257.3756 ex/s | 251.2358 ex/s |
198
+ | Multilingual PPSN short-text path | 35.5561 ex/s | 230.5181 ex/s | 256.0768 ex/s |
199
+ | Runtime profile source | 23.8338 ex/s | 179.4708 ex/s | 184.2930 ex/s |
200
+
201
+ Notes:
202
+
203
+ - The `rc5` speed references come from its published q8 end-to-end inference stack, which includes its older repair decoder.
204
+ - The `rc8` and `IrishCore-DiffMask-135M-v1-rc1` numbers use the same raw-only token-span ONNX path.
205
+ - A weight-only q4 ONNX experiment was also tried during development, but it was slower than q8 on this CPU and is not shipped.
206
+
207
+ ## Limits
208
+
209
+ - This is still a compact model. The hardest remaining errors are multilingual PPSN near-miss cases rather than Irish core numeric formats.
210
+ - The release path is intentionally scanner-free. If you need deterministic validation of individual identifier types, add that in your application layer.
211
+ - If you rely on release behavior, use the bundled inference scripts or import `decode_token_presence_segments` from `common.py`.
212
+
213
+ ## License And Attribution
214
+
215
+ - Release license: Apache-2.0
216
+ - Base model: `OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1`
217
+ - The derivative release remains subject to the attribution terms of the upstream datasets listed above.
218
+ - See `NOTICE`, `training_sources.json`, and `eval/benchmark_summary.json` for provenance and benchmark details.
common.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ import tempfile
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+ import numpy as np
10
+ from huggingface_hub import HfApi, hf_hub_download
11
+ from transformers import AutoConfig, AutoTokenizer
12
+
13
+ TOKENIZER_FILES = [
14
+ "tokenizer_config.json",
15
+ "tokenizer.json",
16
+ "special_tokens_map.json",
17
+ "vocab.txt",
18
+ "vocab.json",
19
+ "merges.txt",
20
+ "added_tokens.json",
21
+ "sentencepiece.bpe.model",
22
+ "spiece.model",
23
+ ]
24
+ DEFAULT_LABEL_MAX_SPAN_TOKENS = {
25
+ # Token-piece limits, not word limits. These need to reflect how the
26
+ # underlying tokenizer actually fragments compact identifiers.
27
+ "PPSN": 9,
28
+ "POSTCODE": 7,
29
+ "PHONE_NUMBER": 10,
30
+ "PASSPORT_NUMBER": 8,
31
+ "BANK_ROUTING_NUMBER": 5,
32
+ "ACCOUNT_NUMBER": 19,
33
+ "CREDIT_DEBIT_CARD": 12,
34
+ "SWIFT_BIC": 8,
35
+ "EMAIL": 15,
36
+ "FIRST_NAME": 5,
37
+ "LAST_NAME": 8,
38
+ }
39
+ DEFAULT_LABEL_MIN_NONSPACE_CHARS = {
40
+ "PPSN": 8,
41
+ "POSTCODE": 6,
42
+ "PHONE_NUMBER": 7,
43
+ "PASSPORT_NUMBER": 7,
44
+ "BANK_ROUTING_NUMBER": 6,
45
+ "ACCOUNT_NUMBER": 6,
46
+ "CREDIT_DEBIT_CARD": 12,
47
+ "SWIFT_BIC": 8,
48
+ "EMAIL": 6,
49
+ "FIRST_NAME": 2,
50
+ "LAST_NAME": 2,
51
+ }
52
+ WHITESPACE_BRIDGE_LABELS = {
53
+ "PPSN",
54
+ "POSTCODE",
55
+ "PHONE_NUMBER",
56
+ "PASSPORT_NUMBER",
57
+ "BANK_ROUTING_NUMBER",
58
+ "ACCOUNT_NUMBER",
59
+ "CREDIT_DEBIT_CARD",
60
+ "SWIFT_BIC",
61
+ "EMAIL",
62
+ }
63
+ CONSERVATIVE_BOUNDARY_REFINEMENT_LABELS = {
64
+ "PPSN",
65
+ "POSTCODE",
66
+ "PHONE_NUMBER",
67
+ "PASSPORT_NUMBER",
68
+ "BANK_ROUTING_NUMBER",
69
+ "ACCOUNT_NUMBER",
70
+ "CREDIT_DEBIT_CARD",
71
+ "SWIFT_BIC",
72
+ "EMAIL",
73
+ }
74
+ OUTPUT_PRIORITY = {
75
+ "PPSN": 0,
76
+ "PASSPORT_NUMBER": 1,
77
+ "ACCOUNT_NUMBER": 2,
78
+ "BANK_ROUTING_NUMBER": 3,
79
+ "CREDIT_DEBIT_CARD": 4,
80
+ "PHONE_NUMBER": 5,
81
+ "SWIFT_BIC": 6,
82
+ "POSTCODE": 7,
83
+ "EMAIL": 8,
84
+ "FIRST_NAME": 9,
85
+ "LAST_NAME": 10,
86
+ }
87
+
88
+
89
+ def normalize_entity_name(label: str) -> str:
90
+ label = (label or "").strip()
91
+ if label.startswith("B-") or label.startswith("I-"):
92
+ label = label[2:]
93
+ return label.upper()
94
+
95
+
96
+ def _sanitize_tokenizer_dir(tokenizer_path: Path) -> str:
97
+ tokenizer_cfg_path = tokenizer_path / "tokenizer_config.json"
98
+ if not tokenizer_cfg_path.exists():
99
+ return str(tokenizer_path)
100
+ data = json.loads(tokenizer_cfg_path.read_text(encoding="utf-8"))
101
+ if "fix_mistral_regex" not in data:
102
+ return str(tokenizer_path)
103
+ tmpdir = Path(tempfile.mkdtemp(prefix="openmed_span_tokenizer_"))
104
+ keep = set(TOKENIZER_FILES)
105
+ for child in tokenizer_path.iterdir():
106
+ if child.is_file() and child.name in keep:
107
+ (tmpdir / child.name).write_bytes(child.read_bytes())
108
+ data.pop("fix_mistral_regex", None)
109
+ (tmpdir / "tokenizer_config.json").write_text(json.dumps(data, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
110
+ return str(tmpdir)
111
+
112
+
113
+ def safe_auto_tokenizer(tokenizer_ref: str):
114
+ tokenizer_path = Path(tokenizer_ref)
115
+ if tokenizer_path.exists():
116
+ tokenizer_ref = _sanitize_tokenizer_dir(tokenizer_path)
117
+ else:
118
+ api = HfApi()
119
+ files = set(api.list_repo_files(repo_id=tokenizer_ref, repo_type="model"))
120
+ tmpdir = Path(tempfile.mkdtemp(prefix="openmed_remote_span_tokenizer_"))
121
+ copied = False
122
+ for name in TOKENIZER_FILES:
123
+ if name not in files:
124
+ continue
125
+ src = hf_hub_download(repo_id=tokenizer_ref, filename=name, repo_type="model")
126
+ (tmpdir / Path(name).name).write_bytes(Path(src).read_bytes())
127
+ copied = True
128
+ if copied:
129
+ tokenizer_ref = _sanitize_tokenizer_dir(tmpdir)
130
+
131
+ try:
132
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=True, fix_mistral_regex=True)
133
+ except Exception:
134
+ pass
135
+ try:
136
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=True, fix_mistral_regex=False)
137
+ except TypeError:
138
+ pass
139
+ try:
140
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=True)
141
+ except Exception:
142
+ return AutoTokenizer.from_pretrained(tokenizer_ref, use_fast=False)
143
+
144
+
145
+ def label_names_from_config(config) -> list[str]:
146
+ names = list(getattr(config, "span_label_names", []))
147
+ if not names:
148
+ raise ValueError("Missing span_label_names in config")
149
+ return [normalize_entity_name(name) for name in names]
150
+
151
+
152
+ def label_thresholds_from_config(config, default_threshold: float) -> dict[str, float]:
153
+ raw = getattr(config, "span_label_thresholds", None) or {}
154
+ out = {normalize_entity_name(key): float(value) for key, value in raw.items()}
155
+ for label in label_names_from_config(config):
156
+ out.setdefault(label, float(default_threshold))
157
+ return out
158
+
159
+
160
+ def token_label_thresholds_from_config(config, default_threshold: float) -> dict[str, float]:
161
+ raw = getattr(config, "token_label_thresholds", None) or {}
162
+ out = {normalize_entity_name(key): float(value) for key, value in raw.items()}
163
+ for label in label_names_from_config(config):
164
+ out.setdefault(label, float(default_threshold))
165
+ return out
166
+
167
+
168
+ def token_extend_thresholds_from_config(config, default_fraction: float = 0.6) -> dict[str, float]:
169
+ raw = getattr(config, "token_extend_thresholds", None) or {}
170
+ out = {normalize_entity_name(key): float(value) for key, value in raw.items()}
171
+ for label in label_names_from_config(config):
172
+ out.setdefault(label, max(0.0, min(1.0, float(token_label_thresholds_from_config(config, 0.5).get(label, 0.5)) * default_fraction)))
173
+ return out
174
+
175
+
176
+ def boundary_label_thresholds_from_config(config, default_threshold: float = 0.0) -> dict[str, float]:
177
+ raw = getattr(config, "boundary_label_thresholds", None) or {}
178
+ out = {normalize_entity_name(key): float(value) for key, value in raw.items()}
179
+ for label in label_names_from_config(config):
180
+ out.setdefault(label, float(default_threshold))
181
+ return out
182
+
183
+
184
+ def label_max_span_tokens_from_config(config) -> dict[str, int]:
185
+ raw = getattr(config, "span_label_max_span_tokens", None) or {}
186
+ out = {normalize_entity_name(key): int(value) for key, value in raw.items()}
187
+ for label, value in DEFAULT_LABEL_MAX_SPAN_TOKENS.items():
188
+ out.setdefault(label, value)
189
+ for label in label_names_from_config(config):
190
+ out.setdefault(label, 8)
191
+ return out
192
+
193
+
194
+ def label_min_nonspace_chars_from_config(config) -> dict[str, int]:
195
+ raw = getattr(config, "span_label_min_nonspace_chars", None) or {}
196
+ out = {normalize_entity_name(key): int(value) for key, value in raw.items()}
197
+ for label, value in DEFAULT_LABEL_MIN_NONSPACE_CHARS.items():
198
+ out.setdefault(label, value)
199
+ for label in label_names_from_config(config):
200
+ out.setdefault(label, 1)
201
+ return out
202
+
203
+
204
+ def overlaps(a: dict, b: dict) -> bool:
205
+ return not (a["end"] <= b["start"] or b["end"] <= a["start"])
206
+
207
+
208
+ def dedupe_spans(spans: list[dict]) -> list[dict]:
209
+ ordered = sorted(
210
+ spans,
211
+ key=lambda item: (-float(item.get("score", 0.0)), item["start"], item["end"], OUTPUT_PRIORITY.get(item["label"], 99)),
212
+ )
213
+ kept = []
214
+ for span in ordered:
215
+ if any(overlaps(span, other) for other in kept):
216
+ continue
217
+ kept.append(span)
218
+ kept.sort(key=lambda item: (item["start"], item["end"], OUTPUT_PRIORITY.get(item["label"], 99)))
219
+ return kept
220
+
221
+
222
+ def _valid_offset(offset: tuple[int, int]) -> bool:
223
+ return bool(offset) and offset[1] > offset[0]
224
+
225
+
226
+ def _has_skippable_bridge(text: str, left: tuple[int, int], right: tuple[int, int], label: str) -> bool:
227
+ bridge = text[int(left[1]) : int(right[0])]
228
+ if bridge == "":
229
+ return True
230
+ return label in WHITESPACE_BRIDGE_LABELS and bridge.isspace()
231
+
232
+
233
+ def _has_left_extension_bridge(text: str, left: tuple[int, int], right: tuple[int, int]) -> bool:
234
+ bridge = text[int(left[1]) : int(right[0])]
235
+ return bridge == ""
236
+
237
+
238
+ def _nonspace_length(text: str, start: int, end: int) -> int:
239
+ return sum(0 if ch.isspace() else 1 for ch in text[int(start) : int(end)])
240
+
241
+
242
+ def decode_span_logits(
243
+ text: str,
244
+ offsets: list[tuple[int, int]],
245
+ start_scores: np.ndarray,
246
+ end_scores: np.ndarray,
247
+ label_names: list[str],
248
+ default_threshold: float,
249
+ label_thresholds: dict[str, float] | None = None,
250
+ label_max_span_tokens: dict[str, int] | None = None,
251
+ ) -> list[dict]:
252
+ thresholds = {label: float(default_threshold) for label in label_names}
253
+ if label_thresholds:
254
+ thresholds.update({normalize_entity_name(key): float(value) for key, value in label_thresholds.items()})
255
+ max_tokens = dict(DEFAULT_LABEL_MAX_SPAN_TOKENS)
256
+ if label_max_span_tokens:
257
+ max_tokens.update({normalize_entity_name(key): int(value) for key, value in label_max_span_tokens.items()})
258
+
259
+ spans: list[dict] = []
260
+ for label_index, label in enumerate(label_names):
261
+ threshold = thresholds.get(label, float(default_threshold))
262
+ max_span = max_tokens.get(label, 8)
263
+ start_candidates = [idx for idx in range(len(offsets)) if _valid_offset(offsets[idx]) and float(start_scores[idx, label_index]) >= threshold]
264
+ for start_idx in start_candidates:
265
+ best = None
266
+ for end_idx in range(start_idx, min(len(offsets), start_idx + max_span)):
267
+ if not _valid_offset(offsets[end_idx]):
268
+ continue
269
+ end_score = float(end_scores[end_idx, label_index])
270
+ if end_score < threshold:
271
+ continue
272
+ score = min(float(start_scores[start_idx, label_index]), end_score)
273
+ if best is None or score > best["score"]:
274
+ best = {
275
+ "label": label,
276
+ "start": int(offsets[start_idx][0]),
277
+ "end": int(offsets[end_idx][1]),
278
+ "score": score,
279
+ }
280
+ if best is not None and best["start"] < best["end"]:
281
+ best["text"] = text[best["start"]:best["end"]]
282
+ spans.append(best)
283
+ return dedupe_spans(spans)
284
+
285
+
286
+ def decode_token_presence_segments(
287
+ text: str,
288
+ offsets: list[tuple[int, int]],
289
+ token_scores: np.ndarray,
290
+ label_names: list[str],
291
+ default_threshold: float,
292
+ label_thresholds: dict[str, float] | None = None,
293
+ label_extend_thresholds: dict[str, float] | None = None,
294
+ label_max_span_tokens: dict[str, int] | None = None,
295
+ label_min_nonspace_chars: dict[str, int] | None = None,
296
+ boundary_label_thresholds: dict[str, float] | None = None,
297
+ start_scores: np.ndarray | None = None,
298
+ end_scores: np.ndarray | None = None,
299
+ ) -> list[dict]:
300
+ thresholds = {label: float(default_threshold) for label in label_names}
301
+ if label_thresholds:
302
+ thresholds.update({normalize_entity_name(key): float(value) for key, value in label_thresholds.items()})
303
+ extend_thresholds = {label: max(0.0, min(1.0, thresholds[label] * 0.6)) for label in label_names}
304
+ if label_extend_thresholds:
305
+ extend_thresholds.update({normalize_entity_name(key): float(value) for key, value in label_extend_thresholds.items()})
306
+ max_tokens = dict(DEFAULT_LABEL_MAX_SPAN_TOKENS)
307
+ if label_max_span_tokens:
308
+ max_tokens.update({normalize_entity_name(key): int(value) for key, value in label_max_span_tokens.items()})
309
+ min_nonspace_chars = dict(DEFAULT_LABEL_MIN_NONSPACE_CHARS)
310
+ if label_min_nonspace_chars:
311
+ min_nonspace_chars.update({normalize_entity_name(key): int(value) for key, value in label_min_nonspace_chars.items()})
312
+ boundary_thresholds = {label: 0.0 for label in label_names}
313
+ if boundary_label_thresholds:
314
+ boundary_thresholds.update({normalize_entity_name(key): float(value) for key, value in boundary_label_thresholds.items()})
315
+
316
+ spans: list[dict] = []
317
+ valid = [_valid_offset(offset) for offset in offsets]
318
+ num_tokens = len(offsets)
319
+ for label_index, label in enumerate(label_names):
320
+ threshold = thresholds.get(label, float(default_threshold))
321
+ extend_threshold = min(threshold, extend_thresholds.get(label, threshold))
322
+ max_span = max_tokens.get(label, 8)
323
+ idx = 0
324
+ while idx < num_tokens:
325
+ if not valid[idx] or float(token_scores[idx, label_index]) < threshold:
326
+ idx += 1
327
+ continue
328
+ start_idx = idx
329
+ end_idx = idx
330
+ while end_idx + 1 < num_tokens and valid[end_idx + 1] and float(token_scores[end_idx + 1, label_index]) >= threshold and (end_idx + 1 - start_idx + 1) <= max_span:
331
+ end_idx += 1
332
+ while (
333
+ start_idx - 1 >= 0
334
+ and valid[start_idx - 1]
335
+ and _has_left_extension_bridge(text, offsets[start_idx - 1], offsets[start_idx])
336
+ and float(token_scores[start_idx - 1, label_index]) >= extend_threshold
337
+ and (end_idx - (start_idx - 1) + 1) <= max_span
338
+ ):
339
+ start_idx -= 1
340
+ while (
341
+ end_idx + 1 < num_tokens
342
+ and valid[end_idx + 1]
343
+ and _has_skippable_bridge(text, offsets[end_idx], offsets[end_idx + 1], label)
344
+ and float(token_scores[end_idx + 1, label_index]) >= extend_threshold
345
+ and ((end_idx + 1) - start_idx + 1) <= max_span
346
+ ):
347
+ end_idx += 1
348
+ presence_slice = token_scores[start_idx : end_idx + 1, label_index]
349
+ score = float(presence_slice.mean())
350
+ out_start_idx = start_idx
351
+ out_end_idx = end_idx
352
+ if start_scores is not None and end_scores is not None:
353
+ refine_window = min(3, end_idx - start_idx + 1)
354
+ start_window = start_scores[start_idx : start_idx + refine_window, label_index]
355
+ best_start_rel = int(np.argmax(start_window))
356
+ best_start_idx = start_idx + best_start_rel
357
+ end_window_start = max(best_start_idx, end_idx - refine_window + 1)
358
+ end_window = end_scores[end_window_start : end_idx + 1, label_index]
359
+ best_end_rel = int(np.argmax(end_window))
360
+ best_end_idx = end_window_start + best_end_rel
361
+ if (
362
+ float(start_scores[best_start_idx, label_index]) < boundary_thresholds.get(label, 0.0)
363
+ or float(end_scores[best_end_idx, label_index]) < boundary_thresholds.get(label, 0.0)
364
+ ):
365
+ idx = end_idx + 1
366
+ continue
367
+ out_start_idx = best_start_idx
368
+ out_end_idx = best_end_idx
369
+ if label in CONSERVATIVE_BOUNDARY_REFINEMENT_LABELS and (
370
+ best_start_idx != start_idx or best_end_idx != end_idx
371
+ ):
372
+ outer_boundary = min(float(start_scores[start_idx, label_index]), float(end_scores[end_idx, label_index]))
373
+ refined_boundary = min(
374
+ float(start_scores[best_start_idx, label_index]),
375
+ float(end_scores[best_end_idx, label_index]),
376
+ )
377
+ if refined_boundary < outer_boundary + 0.08:
378
+ out_start_idx = start_idx
379
+ out_end_idx = end_idx
380
+ score = (
381
+ 0.65 * score
382
+ + 0.175 * float(start_scores[out_start_idx, label_index])
383
+ + 0.175 * float(end_scores[out_end_idx, label_index])
384
+ )
385
+ min_chars = int(min_nonspace_chars.get(label, 1))
386
+ if _nonspace_length(text, offsets[out_start_idx][0], offsets[out_end_idx][1]) < min_chars:
387
+ idx = end_idx + 1
388
+ continue
389
+ spans.append(
390
+ {
391
+ "label": label,
392
+ "start": int(offsets[out_start_idx][0]),
393
+ "end": int(offsets[out_end_idx][1]),
394
+ "score": score,
395
+ "text": text[int(offsets[out_start_idx][0]) : int(offsets[out_end_idx][1])],
396
+ }
397
+ )
398
+ idx = end_idx + 1
399
+ return dedupe_spans(spans)
400
+
401
+
402
+ def load_onnx_session(model_ref: str, onnx_file: str = "model_quantized.onnx", onnx_subfolder: str = "onnx"):
403
+ import onnxruntime as ort
404
+
405
+ model_path = Path(model_ref)
406
+ if model_path.exists():
407
+ candidates = []
408
+ if onnx_subfolder:
409
+ candidates.append(model_path / onnx_subfolder / onnx_file)
410
+ candidates.append(model_path / onnx_file)
411
+ onnx_path = next((path for path in candidates if path.exists()), candidates[0])
412
+ config = AutoConfig.from_pretrained(model_ref)
413
+ tokenizer = safe_auto_tokenizer(model_ref)
414
+ else:
415
+ remote_name = f"{onnx_subfolder}/{onnx_file}" if onnx_subfolder else onnx_file
416
+ onnx_path = Path(hf_hub_download(repo_id=model_ref, filename=remote_name, repo_type="model"))
417
+ config = AutoConfig.from_pretrained(model_ref)
418
+ tokenizer = safe_auto_tokenizer(model_ref)
419
+ session = ort.InferenceSession(str(onnx_path), providers=["CPUExecutionProvider"])
420
+ return session, tokenizer, config
421
+
422
+
423
+ def run_onnx(session, encoded: dict[str, Any]) -> tuple[np.ndarray, np.ndarray]:
424
+ feed = {}
425
+ input_names = {item.name for item in session.get_inputs()}
426
+ for key, value in encoded.items():
427
+ if key == "offset_mapping":
428
+ continue
429
+ if key in input_names:
430
+ feed[key] = value
431
+ outputs = session.run(None, feed)
432
+ return outputs[0], outputs[1]
433
+
434
+
435
+ def run_onnx_all(session, encoded: dict[str, Any]) -> list[np.ndarray]:
436
+ feed = {}
437
+ input_names = {item.name for item in session.get_inputs()}
438
+ for key, value in encoded.items():
439
+ if key == "offset_mapping":
440
+ continue
441
+ if key in input_names:
442
+ feed[key] = value
443
+ return session.run(None, feed)
config.json ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "IrishCoreTokenSpanModel"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "boundary_label_thresholds": {
8
+ "ACCOUNT_NUMBER": 0.1,
9
+ "BANK_ROUTING_NUMBER": 0.25,
10
+ "CREDIT_DEBIT_CARD": 0.25,
11
+ "EMAIL": 0.2,
12
+ "FIRST_NAME": 0.1,
13
+ "LAST_NAME": 0.1,
14
+ "PASSPORT_NUMBER": 0.25,
15
+ "PHONE_NUMBER": 0.25,
16
+ "POSTCODE": 0.4,
17
+ "PPSN": 0.35,
18
+ "SWIFT_BIC": 0.25
19
+ },
20
+ "boundary_loss_weight": 1.0,
21
+ "dim": 768,
22
+ "dropout": 0.1,
23
+ "dtype": "float32",
24
+ "hidden_dim": 3072,
25
+ "id2label": {
26
+ "0": "O",
27
+ "1": "B-account_number",
28
+ "2": "B-age",
29
+ "3": "B-api_key",
30
+ "4": "B-bank_routing_number",
31
+ "5": "B-biometric_identifier",
32
+ "6": "B-blood_type",
33
+ "7": "B-certificate_license_number",
34
+ "8": "B-city",
35
+ "9": "B-company_name",
36
+ "10": "B-coordinate",
37
+ "11": "B-country",
38
+ "12": "B-county",
39
+ "13": "B-credit_debit_card",
40
+ "14": "B-customer_id",
41
+ "15": "B-cvv",
42
+ "16": "B-date",
43
+ "17": "B-date_of_birth",
44
+ "18": "B-date_time",
45
+ "19": "B-device_identifier",
46
+ "20": "B-education_level",
47
+ "21": "B-email",
48
+ "22": "B-employee_id",
49
+ "23": "B-employment_status",
50
+ "24": "B-fax_number",
51
+ "25": "B-first_name",
52
+ "26": "B-gender",
53
+ "27": "B-health_plan_beneficiary_number",
54
+ "28": "B-http_cookie",
55
+ "29": "B-ipv4",
56
+ "30": "B-ipv6",
57
+ "31": "B-language",
58
+ "32": "B-last_name",
59
+ "33": "B-license_plate",
60
+ "34": "B-mac_address",
61
+ "35": "B-medical_record_number",
62
+ "36": "B-occupation",
63
+ "37": "B-password",
64
+ "38": "B-phone_number",
65
+ "39": "B-pin",
66
+ "40": "B-political_view",
67
+ "41": "B-postcode",
68
+ "42": "B-race_ethnicity",
69
+ "43": "B-religious_belief",
70
+ "44": "B-sexuality",
71
+ "45": "B-ssn",
72
+ "46": "B-state",
73
+ "47": "B-street_address",
74
+ "48": "B-swift_bic",
75
+ "49": "B-tax_id",
76
+ "50": "B-time",
77
+ "51": "B-unique_id",
78
+ "52": "B-url",
79
+ "53": "B-user_name",
80
+ "54": "B-vehicle_identifier",
81
+ "55": "I-account_number",
82
+ "56": "I-api_key",
83
+ "57": "I-biometric_identifier",
84
+ "58": "I-blood_type",
85
+ "59": "I-certificate_license_number",
86
+ "60": "I-city",
87
+ "61": "I-company_name",
88
+ "62": "I-coordinate",
89
+ "63": "I-country",
90
+ "64": "I-county",
91
+ "65": "I-credit_debit_card",
92
+ "66": "I-customer_id",
93
+ "67": "I-date",
94
+ "68": "I-date_of_birth",
95
+ "69": "I-date_time",
96
+ "70": "I-device_identifier",
97
+ "71": "I-education_level",
98
+ "72": "I-email",
99
+ "73": "I-employee_id",
100
+ "74": "I-employment_status",
101
+ "75": "I-fax_number",
102
+ "76": "I-first_name",
103
+ "77": "I-gender",
104
+ "78": "I-health_plan_beneficiary_number",
105
+ "79": "I-http_cookie",
106
+ "80": "I-ipv4",
107
+ "81": "I-ipv6",
108
+ "82": "I-language",
109
+ "83": "I-last_name",
110
+ "84": "I-license_plate",
111
+ "85": "I-mac_address",
112
+ "86": "I-medical_record_number",
113
+ "87": "I-occupation",
114
+ "88": "I-password",
115
+ "89": "I-phone_number",
116
+ "90": "I-pin",
117
+ "91": "I-political_view",
118
+ "92": "I-postcode",
119
+ "93": "I-race_ethnicity",
120
+ "94": "I-religious_belief",
121
+ "95": "I-sexuality",
122
+ "96": "I-ssn",
123
+ "97": "I-state",
124
+ "98": "I-street_address",
125
+ "99": "I-swift_bic",
126
+ "100": "I-tax_id",
127
+ "101": "I-time",
128
+ "102": "I-unique_id",
129
+ "103": "I-url",
130
+ "104": "I-user_name",
131
+ "105": "I-vehicle_identifier",
132
+ "106": "B-PPSN",
133
+ "107": "I-PPSN",
134
+ "108": "B-PASSPORT_NUMBER",
135
+ "109": "I-PASSPORT_NUMBER",
136
+ "110": "I-bank_routing_number"
137
+ },
138
+ "initializer_range": 0.02,
139
+ "label2id": {
140
+ "B-PASSPORT_NUMBER": 108,
141
+ "B-PPSN": 106,
142
+ "B-account_number": 1,
143
+ "B-age": 2,
144
+ "B-api_key": 3,
145
+ "B-bank_routing_number": 4,
146
+ "B-biometric_identifier": 5,
147
+ "B-blood_type": 6,
148
+ "B-certificate_license_number": 7,
149
+ "B-city": 8,
150
+ "B-company_name": 9,
151
+ "B-coordinate": 10,
152
+ "B-country": 11,
153
+ "B-county": 12,
154
+ "B-credit_debit_card": 13,
155
+ "B-customer_id": 14,
156
+ "B-cvv": 15,
157
+ "B-date": 16,
158
+ "B-date_of_birth": 17,
159
+ "B-date_time": 18,
160
+ "B-device_identifier": 19,
161
+ "B-education_level": 20,
162
+ "B-email": 21,
163
+ "B-employee_id": 22,
164
+ "B-employment_status": 23,
165
+ "B-fax_number": 24,
166
+ "B-first_name": 25,
167
+ "B-gender": 26,
168
+ "B-health_plan_beneficiary_number": 27,
169
+ "B-http_cookie": 28,
170
+ "B-ipv4": 29,
171
+ "B-ipv6": 30,
172
+ "B-language": 31,
173
+ "B-last_name": 32,
174
+ "B-license_plate": 33,
175
+ "B-mac_address": 34,
176
+ "B-medical_record_number": 35,
177
+ "B-occupation": 36,
178
+ "B-password": 37,
179
+ "B-phone_number": 38,
180
+ "B-pin": 39,
181
+ "B-political_view": 40,
182
+ "B-postcode": 41,
183
+ "B-race_ethnicity": 42,
184
+ "B-religious_belief": 43,
185
+ "B-sexuality": 44,
186
+ "B-ssn": 45,
187
+ "B-state": 46,
188
+ "B-street_address": 47,
189
+ "B-swift_bic": 48,
190
+ "B-tax_id": 49,
191
+ "B-time": 50,
192
+ "B-unique_id": 51,
193
+ "B-url": 52,
194
+ "B-user_name": 53,
195
+ "B-vehicle_identifier": 54,
196
+ "I-PASSPORT_NUMBER": 109,
197
+ "I-PPSN": 107,
198
+ "I-account_number": 55,
199
+ "I-api_key": 56,
200
+ "I-bank_routing_number": 110,
201
+ "I-biometric_identifier": 57,
202
+ "I-blood_type": 58,
203
+ "I-certificate_license_number": 59,
204
+ "I-city": 60,
205
+ "I-company_name": 61,
206
+ "I-coordinate": 62,
207
+ "I-country": 63,
208
+ "I-county": 64,
209
+ "I-credit_debit_card": 65,
210
+ "I-customer_id": 66,
211
+ "I-date": 67,
212
+ "I-date_of_birth": 68,
213
+ "I-date_time": 69,
214
+ "I-device_identifier": 70,
215
+ "I-education_level": 71,
216
+ "I-email": 72,
217
+ "I-employee_id": 73,
218
+ "I-employment_status": 74,
219
+ "I-fax_number": 75,
220
+ "I-first_name": 76,
221
+ "I-gender": 77,
222
+ "I-health_plan_beneficiary_number": 78,
223
+ "I-http_cookie": 79,
224
+ "I-ipv4": 80,
225
+ "I-ipv6": 81,
226
+ "I-language": 82,
227
+ "I-last_name": 83,
228
+ "I-license_plate": 84,
229
+ "I-mac_address": 85,
230
+ "I-medical_record_number": 86,
231
+ "I-occupation": 87,
232
+ "I-password": 88,
233
+ "I-phone_number": 89,
234
+ "I-pin": 90,
235
+ "I-political_view": 91,
236
+ "I-postcode": 92,
237
+ "I-race_ethnicity": 93,
238
+ "I-religious_belief": 94,
239
+ "I-sexuality": 95,
240
+ "I-ssn": 96,
241
+ "I-state": 97,
242
+ "I-street_address": 98,
243
+ "I-swift_bic": 99,
244
+ "I-tax_id": 100,
245
+ "I-time": 101,
246
+ "I-unique_id": 102,
247
+ "I-url": 103,
248
+ "I-user_name": 104,
249
+ "I-vehicle_identifier": 105,
250
+ "O": 0
251
+ },
252
+ "max_position_embeddings": 512,
253
+ "model_type": "distilbert",
254
+ "n_heads": 12,
255
+ "n_layers": 6,
256
+ "num_span_labels": 11,
257
+ "output_past": true,
258
+ "pad_token_id": 0,
259
+ "qa_dropout": 0.1,
260
+ "seq_classif_dropout": 0.2,
261
+ "sinusoidal_pos_embds": false,
262
+ "span_label_max_span_tokens": {
263
+ "ACCOUNT_NUMBER": 19,
264
+ "BANK_ROUTING_NUMBER": 6,
265
+ "CREDIT_DEBIT_CARD": 13,
266
+ "EMAIL": 16,
267
+ "FIRST_NAME": 5,
268
+ "LAST_NAME": 8,
269
+ "PASSPORT_NUMBER": 9,
270
+ "PHONE_NUMBER": 10,
271
+ "POSTCODE": 8,
272
+ "PPSN": 9,
273
+ "SWIFT_BIC": 8
274
+ },
275
+ "span_label_names": [
276
+ "ACCOUNT_NUMBER",
277
+ "BANK_ROUTING_NUMBER",
278
+ "CREDIT_DEBIT_CARD",
279
+ "EMAIL",
280
+ "FIRST_NAME",
281
+ "LAST_NAME",
282
+ "PASSPORT_NUMBER",
283
+ "PHONE_NUMBER",
284
+ "POSTCODE",
285
+ "PPSN",
286
+ "SWIFT_BIC"
287
+ ],
288
+ "span_label_thresholds": {
289
+ "ACCOUNT_NUMBER": 0.5,
290
+ "BANK_ROUTING_NUMBER": 0.5,
291
+ "CREDIT_DEBIT_CARD": 0.5,
292
+ "EMAIL": 0.5,
293
+ "FIRST_NAME": 0.5,
294
+ "LAST_NAME": 0.5,
295
+ "PASSPORT_NUMBER": 0.5,
296
+ "PHONE_NUMBER": 0.5,
297
+ "POSTCODE": 0.5,
298
+ "PPSN": 0.5,
299
+ "SWIFT_BIC": 0.5
300
+ },
301
+ "span_positive_weight": 6.0,
302
+ "tie_weights_": true,
303
+ "token_extend_thresholds": {
304
+ "ACCOUNT_NUMBER": 0.08,
305
+ "BANK_ROUTING_NUMBER": 0.3,
306
+ "CREDIT_DEBIT_CARD": 0.3,
307
+ "EMAIL": 0.3,
308
+ "FIRST_NAME": 0.3,
309
+ "LAST_NAME": 0.3,
310
+ "PASSPORT_NUMBER": 0.3,
311
+ "PHONE_NUMBER": 0.15,
312
+ "POSTCODE": 0.3,
313
+ "PPSN": 0.3,
314
+ "SWIFT_BIC": 0.3
315
+ },
316
+ "token_label_thresholds": {
317
+ "ACCOUNT_NUMBER": 0.18,
318
+ "BANK_ROUTING_NUMBER": 0.8,
319
+ "CREDIT_DEBIT_CARD": 0.8,
320
+ "EMAIL": 0.95,
321
+ "FIRST_NAME": 0.3,
322
+ "LAST_NAME": 0.4,
323
+ "PASSPORT_NUMBER": 0.8,
324
+ "PHONE_NUMBER": 0.65,
325
+ "POSTCODE": 0.9,
326
+ "PPSN": 0.7,
327
+ "SWIFT_BIC": 0.8
328
+ },
329
+ "token_positive_weight": 4.0,
330
+ "token_presence_weight": 1.0,
331
+ "transformers_version": "4.57.6",
332
+ "vocab_size": 119547
333
+ }
eval/benchmark_summary.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "release": "IrishCore-DiffMask-135M-v1-rc1",
3
+ "repo_id": "temsa/IrishCore-DiffMask-135M-v1-rc1",
4
+ "architecture": {
5
+ "family": "DistilBERT-size token-span extractor",
6
+ "diffusion_style_training": true,
7
+ "runtime_diffusion": false,
8
+ "scanner_free": true,
9
+ "validator_free": true,
10
+ "heads": [
11
+ "token_presence_head",
12
+ "typed_start_boundary_head",
13
+ "typed_end_boundary_head"
14
+ ]
15
+ },
16
+ "base_model": "OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1",
17
+ "notes": [
18
+ "DiffMask uses a masked denoising training schedule, not a generative diffusion runtime.",
19
+ "ONNX q8 is the recommended CPU deployment artifact.",
20
+ "The release inference scripts emit [PII:LABEL] placeholders."
21
+ ],
22
+ "full": {
23
+ "core_f1": 0.9801324503311258,
24
+ "edge_f1": 1.0,
25
+ "multilingual_f1": 0.9411764705882352,
26
+ "hardening_f1": 0.9743589743589743
27
+ },
28
+ "onnx_q8": {
29
+ "core_f1": 0.9933774834437086,
30
+ "edge_f1": 1.0,
31
+ "finance_f1": 1.0,
32
+ "finance_boundary_f1": 1.0,
33
+ "user_ppsn_f1": 1.0,
34
+ "gaelic_weak_ppsn_f1": 1.0,
35
+ "multilingual_f1": 0.9411764705882352,
36
+ "hardening_f1": 0.9743589743589743,
37
+ "core_examples_per_second": 251.2357762469535,
38
+ "multilingual_examples_per_second": 256.0767781803832,
39
+ "runtime_profile_examples_per_second": 184.2930265536054
40
+ },
41
+ "comparison": {
42
+ "public_rc5_onnx_q8": {
43
+ "core": 0.9668874172185431,
44
+ "edge": 0.9743589743589743,
45
+ "remaining_gaps": 0.888888888888889,
46
+ "finance": 0.9361702127659575,
47
+ "finance_boundary": 0.8750000000000001,
48
+ "multilingual_ppsn": 0.9333333333333333,
49
+ "user_ppsn": 1.0,
50
+ "gaelic_weak_ppsn": 1.0,
51
+ "overlap_ppsn": 1.0
52
+ },
53
+ "public_rc8_onnx_q8": {
54
+ "min_score": 0.5,
55
+ "irish_core_manual_f1": 0.9736842105263158,
56
+ "irish_edge_f1": 1.0,
57
+ "finance_suite_f1": 1.0,
58
+ "finance_boundary_f1": 1.0,
59
+ "user_raw_ppsn_f1": 1.0,
60
+ "gaelic_weak_ppsn_f1": 1.0,
61
+ "multilingual_ppsn_f1": 0.9176470588235294,
62
+ "core_examples_per_second": 46.14201741375802,
63
+ "multilingual_examples_per_second": 99.71655616732895
64
+ }
65
+ }
66
+ }
eval/benchmark_summary.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Benchmark Summary
2
+
3
+ ## ONNX q8
4
+
5
+ | Suite | F1 | Examples/s |
6
+ |---|---:|---:|
7
+ | Irish core | 0.9934 | 251.2358 |
8
+ | Edge | 1.0000 | 265.4837 |
9
+ | Finance | 1.0000 | 155.8590 |
10
+ | Finance boundary | 1.0000 | 238.5489 |
11
+ | User PPSN | 1.0000 | 200.2353 |
12
+ | GA weak PPSN | 1.0000 | 133.5861 |
13
+ | Multilingual PPSN | 0.9412 | 256.0768 |
14
+ | Hardening exact | 0.9744 | 249.1251 |
15
+
16
+ ## Full checkpoint
17
+
18
+ | Suite | F1 | Examples/s |
19
+ |---|---:|---:|
20
+ | Irish core | 0.9801 | 89.6860 |
21
+ | Edge | 1.0000 | 115.5775 |
22
+ | Multilingual PPSN | 0.9412 | 115.2831 |
23
+ | Hardening exact | 0.9744 | 98.6398 |
eval/full_core_irish_core_pii_v1.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_core_pii_v1.jsonl",
4
+ "loader_type": "token_span_pt",
5
+ "examples": 37,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.41255046310834587,
9
+ "examples_per_second": 89.68599797761685,
10
+ "overall": {
11
+ "precision": 0.9866666666666667,
12
+ "recall": 0.9736842105263158,
13
+ "f1": 0.9801324503311258,
14
+ "tp": 74,
15
+ "fp": 1,
16
+ "fn": 2
17
+ },
18
+ "by_label": {
19
+ "ACCOUNT_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 3,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "BANK_ROUTING_NUMBER": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 1,
32
+ "fp": 0,
33
+ "fn": 0
34
+ },
35
+ "CREDIT_DEBIT_CARD": {
36
+ "precision": 1.0,
37
+ "recall": 1.0,
38
+ "f1": 1.0,
39
+ "tp": 2,
40
+ "fp": 0,
41
+ "fn": 0
42
+ },
43
+ "EMAIL": {
44
+ "precision": 1.0,
45
+ "recall": 1.0,
46
+ "f1": 1.0,
47
+ "tp": 6,
48
+ "fp": 0,
49
+ "fn": 0
50
+ },
51
+ "FIRST_NAME": {
52
+ "precision": 0.9444444444444444,
53
+ "recall": 0.8947368421052632,
54
+ "f1": 0.918918918918919,
55
+ "tp": 17,
56
+ "fp": 1,
57
+ "fn": 2
58
+ },
59
+ "LAST_NAME": {
60
+ "precision": 1.0,
61
+ "recall": 1.0,
62
+ "f1": 1.0,
63
+ "tp": 19,
64
+ "fp": 0,
65
+ "fn": 0
66
+ },
67
+ "PASSPORT_NUMBER": {
68
+ "precision": 1.0,
69
+ "recall": 1.0,
70
+ "f1": 1.0,
71
+ "tp": 2,
72
+ "fp": 0,
73
+ "fn": 0
74
+ },
75
+ "PHONE_NUMBER": {
76
+ "precision": 1.0,
77
+ "recall": 1.0,
78
+ "f1": 1.0,
79
+ "tp": 12,
80
+ "fp": 0,
81
+ "fn": 0
82
+ },
83
+ "POSTCODE": {
84
+ "precision": 1.0,
85
+ "recall": 1.0,
86
+ "f1": 1.0,
87
+ "tp": 4,
88
+ "fp": 0,
89
+ "fn": 0
90
+ },
91
+ "PPSN": {
92
+ "precision": 1.0,
93
+ "recall": 1.0,
94
+ "f1": 1.0,
95
+ "tp": 6,
96
+ "fp": 0,
97
+ "fn": 0
98
+ },
99
+ "SWIFT_BIC": {
100
+ "precision": 1.0,
101
+ "recall": 1.0,
102
+ "f1": 1.0,
103
+ "tp": 2,
104
+ "fp": 0,
105
+ "fn": 0
106
+ }
107
+ }
108
+ }
eval/full_edge_irish_ppsn_phone_edge_v1.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_ppsn_phone_edge_v1.jsonl",
4
+ "loader_type": "token_span_pt",
5
+ "examples": 22,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.19034840108361095,
9
+ "examples_per_second": 115.57754031428114,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 1.0,
13
+ "f1": 1.0,
14
+ "tp": 19,
15
+ "fp": 0,
16
+ "fn": 0
17
+ },
18
+ "by_label": {
19
+ "PHONE_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 13,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "PPSN": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 6,
32
+ "fp": 0,
33
+ "fn": 0
34
+ }
35
+ }
36
+ }
eval/full_hardening_exact_v1.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_dllm_hardening_exact_v1.jsonl",
4
+ "loader_type": "token_span_pt",
5
+ "examples": 14,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.14193059597164392,
9
+ "examples_per_second": 98.63976054040552,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 0.95,
13
+ "f1": 0.9743589743589743,
14
+ "tp": 19,
15
+ "fp": 0,
16
+ "fn": 1
17
+ },
18
+ "by_label": {
19
+ "ACCOUNT_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 1,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "BANK_ROUTING_NUMBER": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 2,
32
+ "fp": 0,
33
+ "fn": 0
34
+ },
35
+ "CREDIT_DEBIT_CARD": {
36
+ "precision": 1.0,
37
+ "recall": 1.0,
38
+ "f1": 1.0,
39
+ "tp": 2,
40
+ "fp": 0,
41
+ "fn": 0
42
+ },
43
+ "EMAIL": {
44
+ "precision": 1.0,
45
+ "recall": 1.0,
46
+ "f1": 1.0,
47
+ "tp": 2,
48
+ "fp": 0,
49
+ "fn": 0
50
+ },
51
+ "FIRST_NAME": {
52
+ "precision": 1.0,
53
+ "recall": 1.0,
54
+ "f1": 1.0,
55
+ "tp": 1,
56
+ "fp": 0,
57
+ "fn": 0
58
+ },
59
+ "LAST_NAME": {
60
+ "precision": 1.0,
61
+ "recall": 1.0,
62
+ "f1": 1.0,
63
+ "tp": 1,
64
+ "fp": 0,
65
+ "fn": 0
66
+ },
67
+ "PHONE_NUMBER": {
68
+ "precision": 1.0,
69
+ "recall": 1.0,
70
+ "f1": 1.0,
71
+ "tp": 2,
72
+ "fp": 0,
73
+ "fn": 0
74
+ },
75
+ "POSTCODE": {
76
+ "precision": 1.0,
77
+ "recall": 0.6666666666666666,
78
+ "f1": 0.8,
79
+ "tp": 2,
80
+ "fp": 0,
81
+ "fn": 1
82
+ },
83
+ "PPSN": {
84
+ "precision": 1.0,
85
+ "recall": 1.0,
86
+ "f1": 1.0,
87
+ "tp": 3,
88
+ "fp": 0,
89
+ "fn": 0
90
+ },
91
+ "SWIFT_BIC": {
92
+ "precision": 1.0,
93
+ "recall": 1.0,
94
+ "f1": 1.0,
95
+ "tp": 3,
96
+ "fp": 0,
97
+ "fn": 0
98
+ }
99
+ }
100
+ }
eval/full_multilingual_ppsn_v1_all.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/multilingual_ppsn_v1_all.jsonl",
4
+ "loader_type": "token_span_pt",
5
+ "examples": 168,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 1.4572819739114493,
9
+ "examples_per_second": 115.283111304174,
10
+ "overall": {
11
+ "precision": 0.9302325581395349,
12
+ "recall": 0.9523809523809523,
13
+ "f1": 0.9411764705882352,
14
+ "tp": 80,
15
+ "fp": 6,
16
+ "fn": 4
17
+ },
18
+ "by_label": {
19
+ "PPSN": {
20
+ "precision": 0.975609756097561,
21
+ "recall": 0.9523809523809523,
22
+ "f1": 0.963855421686747,
23
+ "tp": 80,
24
+ "fp": 2,
25
+ "fn": 4
26
+ }
27
+ }
28
+ }
eval/q8_core_irish_core_pii_v1.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_core_pii_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 37,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.1472720189485699,
9
+ "examples_per_second": 251.2357762469535,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 0.9868421052631579,
13
+ "f1": 0.9933774834437086,
14
+ "tp": 75,
15
+ "fp": 0,
16
+ "fn": 1
17
+ },
18
+ "by_label": {
19
+ "ACCOUNT_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 3,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "BANK_ROUTING_NUMBER": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 1,
32
+ "fp": 0,
33
+ "fn": 0
34
+ },
35
+ "CREDIT_DEBIT_CARD": {
36
+ "precision": 1.0,
37
+ "recall": 1.0,
38
+ "f1": 1.0,
39
+ "tp": 2,
40
+ "fp": 0,
41
+ "fn": 0
42
+ },
43
+ "EMAIL": {
44
+ "precision": 1.0,
45
+ "recall": 1.0,
46
+ "f1": 1.0,
47
+ "tp": 6,
48
+ "fp": 0,
49
+ "fn": 0
50
+ },
51
+ "FIRST_NAME": {
52
+ "precision": 1.0,
53
+ "recall": 0.9473684210526315,
54
+ "f1": 0.972972972972973,
55
+ "tp": 18,
56
+ "fp": 0,
57
+ "fn": 1
58
+ },
59
+ "LAST_NAME": {
60
+ "precision": 1.0,
61
+ "recall": 1.0,
62
+ "f1": 1.0,
63
+ "tp": 19,
64
+ "fp": 0,
65
+ "fn": 0
66
+ },
67
+ "PASSPORT_NUMBER": {
68
+ "precision": 1.0,
69
+ "recall": 1.0,
70
+ "f1": 1.0,
71
+ "tp": 2,
72
+ "fp": 0,
73
+ "fn": 0
74
+ },
75
+ "PHONE_NUMBER": {
76
+ "precision": 1.0,
77
+ "recall": 1.0,
78
+ "f1": 1.0,
79
+ "tp": 12,
80
+ "fp": 0,
81
+ "fn": 0
82
+ },
83
+ "POSTCODE": {
84
+ "precision": 1.0,
85
+ "recall": 1.0,
86
+ "f1": 1.0,
87
+ "tp": 4,
88
+ "fp": 0,
89
+ "fn": 0
90
+ },
91
+ "PPSN": {
92
+ "precision": 1.0,
93
+ "recall": 1.0,
94
+ "f1": 1.0,
95
+ "tp": 6,
96
+ "fp": 0,
97
+ "fn": 0
98
+ },
99
+ "SWIFT_BIC": {
100
+ "precision": 1.0,
101
+ "recall": 1.0,
102
+ "f1": 1.0,
103
+ "tp": 2,
104
+ "fp": 0,
105
+ "fn": 0
106
+ }
107
+ }
108
+ }
eval/q8_edge_irish_ppsn_phone_edge_v1.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_ppsn_phone_edge_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 22,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.08286760398186743,
9
+ "examples_per_second": 265.48372274422104,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 1.0,
13
+ "f1": 1.0,
14
+ "tp": 19,
15
+ "fp": 0,
16
+ "fn": 0
17
+ },
18
+ "by_label": {
19
+ "PHONE_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 13,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "PPSN": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 6,
32
+ "fp": 0,
33
+ "fn": 0
34
+ }
35
+ }
36
+ }
eval/q8_finance_boundary_repair_v1.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_finance_boundary_repair_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 12,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.05030416103545576,
9
+ "examples_per_second": 238.54885466715305,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 1.0,
13
+ "f1": 1.0,
14
+ "tp": 18,
15
+ "fp": 0,
16
+ "fn": 0
17
+ },
18
+ "by_label": {
19
+ "ACCOUNT_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 2,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "BANK_ROUTING_NUMBER": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 2,
32
+ "fp": 0,
33
+ "fn": 0
34
+ },
35
+ "CREDIT_DEBIT_CARD": {
36
+ "precision": 1.0,
37
+ "recall": 1.0,
38
+ "f1": 1.0,
39
+ "tp": 2,
40
+ "fp": 0,
41
+ "fn": 0
42
+ },
43
+ "PASSPORT_NUMBER": {
44
+ "precision": 1.0,
45
+ "recall": 1.0,
46
+ "f1": 1.0,
47
+ "tp": 4,
48
+ "fp": 0,
49
+ "fn": 0
50
+ },
51
+ "PHONE_NUMBER": {
52
+ "precision": 1.0,
53
+ "recall": 1.0,
54
+ "f1": 1.0,
55
+ "tp": 4,
56
+ "fp": 0,
57
+ "fn": 0
58
+ },
59
+ "PPSN": {
60
+ "precision": 1.0,
61
+ "recall": 1.0,
62
+ "f1": 1.0,
63
+ "tp": 2,
64
+ "fp": 0,
65
+ "fn": 0
66
+ },
67
+ "SWIFT_BIC": {
68
+ "precision": 1.0,
69
+ "recall": 1.0,
70
+ "f1": 1.0,
71
+ "tp": 2,
72
+ "fp": 0,
73
+ "fn": 0
74
+ }
75
+ }
76
+ }
eval/q8_finance_irish_phone_passport_finance_v1.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_phone_passport_finance_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 20,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.12832110095769167,
9
+ "examples_per_second": 155.8590118907578,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 1.0,
13
+ "f1": 1.0,
14
+ "tp": 25,
15
+ "fp": 0,
16
+ "fn": 0
17
+ },
18
+ "by_label": {
19
+ "ACCOUNT_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 2,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "BANK_ROUTING_NUMBER": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 5,
32
+ "fp": 0,
33
+ "fn": 0
34
+ },
35
+ "CREDIT_DEBIT_CARD": {
36
+ "precision": 1.0,
37
+ "recall": 1.0,
38
+ "f1": 1.0,
39
+ "tp": 2,
40
+ "fp": 0,
41
+ "fn": 0
42
+ },
43
+ "PASSPORT_NUMBER": {
44
+ "precision": 1.0,
45
+ "recall": 1.0,
46
+ "f1": 1.0,
47
+ "tp": 6,
48
+ "fp": 0,
49
+ "fn": 0
50
+ },
51
+ "PHONE_NUMBER": {
52
+ "precision": 1.0,
53
+ "recall": 1.0,
54
+ "f1": 1.0,
55
+ "tp": 6,
56
+ "fp": 0,
57
+ "fn": 0
58
+ },
59
+ "PPSN": {
60
+ "precision": 1.0,
61
+ "recall": 1.0,
62
+ "f1": 1.0,
63
+ "tp": 2,
64
+ "fp": 0,
65
+ "fn": 0
66
+ },
67
+ "SWIFT_BIC": {
68
+ "precision": 1.0,
69
+ "recall": 1.0,
70
+ "f1": 1.0,
71
+ "tp": 2,
72
+ "fp": 0,
73
+ "fn": 0
74
+ }
75
+ }
76
+ }
eval/q8_gaelic_weak_ppsn_v1.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/qa_feedback_ga_ppsn_weakctx_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 2,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.01497162296436727,
9
+ "examples_per_second": 133.58605174335713,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 1.0,
13
+ "f1": 1.0,
14
+ "tp": 2,
15
+ "fp": 0,
16
+ "fn": 0
17
+ },
18
+ "by_label": {
19
+ "PPSN": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 2,
24
+ "fp": 0,
25
+ "fn": 0
26
+ }
27
+ }
28
+ }
eval/q8_hardening_exact_v1.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/irish_dllm_hardening_exact_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 14,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.056196670047938824,
9
+ "examples_per_second": 249.12508139819025,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 0.95,
13
+ "f1": 0.9743589743589743,
14
+ "tp": 19,
15
+ "fp": 0,
16
+ "fn": 1
17
+ },
18
+ "by_label": {
19
+ "ACCOUNT_NUMBER": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 1,
24
+ "fp": 0,
25
+ "fn": 0
26
+ },
27
+ "BANK_ROUTING_NUMBER": {
28
+ "precision": 1.0,
29
+ "recall": 1.0,
30
+ "f1": 1.0,
31
+ "tp": 2,
32
+ "fp": 0,
33
+ "fn": 0
34
+ },
35
+ "CREDIT_DEBIT_CARD": {
36
+ "precision": 1.0,
37
+ "recall": 1.0,
38
+ "f1": 1.0,
39
+ "tp": 2,
40
+ "fp": 0,
41
+ "fn": 0
42
+ },
43
+ "EMAIL": {
44
+ "precision": 1.0,
45
+ "recall": 1.0,
46
+ "f1": 1.0,
47
+ "tp": 2,
48
+ "fp": 0,
49
+ "fn": 0
50
+ },
51
+ "FIRST_NAME": {
52
+ "precision": 1.0,
53
+ "recall": 1.0,
54
+ "f1": 1.0,
55
+ "tp": 1,
56
+ "fp": 0,
57
+ "fn": 0
58
+ },
59
+ "LAST_NAME": {
60
+ "precision": 1.0,
61
+ "recall": 1.0,
62
+ "f1": 1.0,
63
+ "tp": 1,
64
+ "fp": 0,
65
+ "fn": 0
66
+ },
67
+ "PHONE_NUMBER": {
68
+ "precision": 1.0,
69
+ "recall": 1.0,
70
+ "f1": 1.0,
71
+ "tp": 2,
72
+ "fp": 0,
73
+ "fn": 0
74
+ },
75
+ "POSTCODE": {
76
+ "precision": 1.0,
77
+ "recall": 0.6666666666666666,
78
+ "f1": 0.8,
79
+ "tp": 2,
80
+ "fp": 0,
81
+ "fn": 1
82
+ },
83
+ "PPSN": {
84
+ "precision": 1.0,
85
+ "recall": 1.0,
86
+ "f1": 1.0,
87
+ "tp": 3,
88
+ "fp": 0,
89
+ "fn": 0
90
+ },
91
+ "SWIFT_BIC": {
92
+ "precision": 1.0,
93
+ "recall": 1.0,
94
+ "f1": 1.0,
95
+ "tp": 3,
96
+ "fp": 0,
97
+ "fn": 0
98
+ }
99
+ }
100
+ }
eval/q8_multilingual_ppsn_v1_all.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/multilingual_ppsn_v1_all.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 168,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.6560532399453223,
9
+ "examples_per_second": 256.0767781803832,
10
+ "overall": {
11
+ "precision": 0.9302325581395349,
12
+ "recall": 0.9523809523809523,
13
+ "f1": 0.9411764705882352,
14
+ "tp": 80,
15
+ "fp": 6,
16
+ "fn": 4
17
+ },
18
+ "by_label": {
19
+ "PPSN": {
20
+ "precision": 0.975609756097561,
21
+ "recall": 0.9523809523809523,
22
+ "f1": 0.963855421686747,
23
+ "tp": 80,
24
+ "fp": 2,
25
+ "fn": 4
26
+ }
27
+ }
28
+ }
eval/q8_runtime_profile_source.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/runtime_profile_source.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 3223,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 17.488453362951986,
9
+ "examples_per_second": 184.2930265536054,
10
+ "overall": {
11
+ "precision": 0.6116352201257862,
12
+ "recall": 0.219340287566958,
13
+ "f1": 0.32288856609255034,
14
+ "tp": 778,
15
+ "fp": 494,
16
+ "fn": 2769
17
+ },
18
+ "by_label": {
19
+ "PPSN": {
20
+ "precision": 0.7524177949709865,
21
+ "recall": 0.219340287566958,
22
+ "f1": 0.3396638288583279,
23
+ "tp": 778,
24
+ "fp": 256,
25
+ "fn": 2769
26
+ }
27
+ }
28
+ }
eval/q8_user_raw_regression_cases_v1.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "release/IrishCore-DiffMask-135M-v1-rc1",
3
+ "input": "eval/user_raw_regression_cases_v1.jsonl",
4
+ "loader_type": "token_span_onnx_q8",
5
+ "examples": 7,
6
+ "min_score": 0.5,
7
+ "iou_threshold": 0.5,
8
+ "elapsed_seconds": 0.03495886700693518,
9
+ "examples_per_second": 200.23532223202005,
10
+ "overall": {
11
+ "precision": 1.0,
12
+ "recall": 1.0,
13
+ "f1": 1.0,
14
+ "tp": 3,
15
+ "fp": 0,
16
+ "fn": 0
17
+ },
18
+ "by_label": {
19
+ "PPSN": {
20
+ "precision": 1.0,
21
+ "recall": 1.0,
22
+ "f1": 1.0,
23
+ "tp": 3,
24
+ "fp": 0,
25
+ "fn": 0
26
+ }
27
+ }
28
+ }
inference_mask.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import json
6
+ import os
7
+
8
+ os.environ.setdefault("TRANSFORMERS_NO_TF", "1")
9
+ os.environ.setdefault("TRANSFORMERS_NO_FLAX", "1")
10
+ os.environ.setdefault("TRANSFORMERS_NO_TORCHVISION", "1")
11
+ os.environ["USE_TF"] = "0"
12
+ os.environ["USE_FLAX"] = "0"
13
+ os.environ["USE_TORCH"] = "1"
14
+
15
+ import torch
16
+ from transformers import AutoConfig
17
+
18
+ from common import (
19
+ boundary_label_thresholds_from_config,
20
+ decode_token_presence_segments,
21
+ label_max_span_tokens_from_config,
22
+ label_min_nonspace_chars_from_config,
23
+ label_names_from_config,
24
+ safe_auto_tokenizer,
25
+ token_extend_thresholds_from_config,
26
+ token_label_thresholds_from_config,
27
+ )
28
+ from multitask_model import IrishCoreTokenSpanModel
29
+
30
+
31
+ def replacement(label: str) -> str:
32
+ return f"[PII:{label}]"
33
+
34
+
35
+ def mask_text(text: str, spans: list[dict]) -> str:
36
+ out = text
37
+ for span in sorted(spans, key=lambda item: (item["start"], item["end"]), reverse=True):
38
+ out = out[: span["start"]] + replacement(span["label"]) + out[span["end"] :]
39
+ return out
40
+
41
+
42
+ def predict(text: str, model, tokenizer, min_score: float):
43
+ encoded = tokenizer(text, return_offsets_mapping=True, return_tensors="pt", truncation=True)
44
+ offsets = [tuple(item) for item in encoded.pop("offset_mapping")[0].tolist()]
45
+ device = next(model.parameters()).device
46
+ encoded = {key: value.to(device) for key, value in encoded.items()}
47
+ with torch.no_grad():
48
+ output = model(**encoded)
49
+ token_scores = torch.sigmoid(output.token_logits[0]).cpu().numpy()
50
+ start_scores = torch.sigmoid(output.start_logits[0]).cpu().numpy()
51
+ end_scores = torch.sigmoid(output.end_logits[0]).cpu().numpy()
52
+ label_names = label_names_from_config(model.config)
53
+ thresholds = token_label_thresholds_from_config(model.config, min_score)
54
+ extend_thresholds = token_extend_thresholds_from_config(model.config)
55
+ max_span_tokens = label_max_span_tokens_from_config(model.config)
56
+ min_nonspace_chars = label_min_nonspace_chars_from_config(model.config)
57
+ boundary_thresholds = boundary_label_thresholds_from_config(model.config)
58
+ spans = decode_token_presence_segments(
59
+ text,
60
+ offsets,
61
+ token_scores,
62
+ label_names,
63
+ min_score,
64
+ thresholds,
65
+ extend_thresholds,
66
+ max_span_tokens,
67
+ min_nonspace_chars,
68
+ boundary_thresholds,
69
+ start_scores=start_scores,
70
+ end_scores=end_scores,
71
+ )
72
+ for span in spans:
73
+ span["replacement"] = replacement(span["label"])
74
+ return spans
75
+
76
+
77
+ def main() -> None:
78
+ parser = argparse.ArgumentParser()
79
+ parser.add_argument("--model", required=True)
80
+ parser.add_argument("--text", required=True)
81
+ parser.add_argument("--min-score", type=float, default=0.5)
82
+ parser.add_argument("--device", choices=["auto", "cpu", "cuda"], default="auto")
83
+ parser.add_argument("--json", action="store_true")
84
+ args = parser.parse_args()
85
+
86
+ tokenizer = safe_auto_tokenizer(args.model)
87
+ config = AutoConfig.from_pretrained(args.model)
88
+ model = IrishCoreTokenSpanModel.from_pretrained(args.model, config=config)
89
+ if args.device == "auto":
90
+ device = "cuda" if torch.cuda.is_available() else "cpu"
91
+ else:
92
+ device = args.device
93
+ model.to(device)
94
+ model.eval()
95
+
96
+ spans = predict(args.text, model, tokenizer, args.min_score)
97
+ result = {
98
+ "model": args.model,
99
+ "backend": "transformers_token_span",
100
+ "min_score": args.min_score,
101
+ "spans": spans,
102
+ "masked_text": mask_text(args.text, spans),
103
+ }
104
+ if args.json:
105
+ print(json.dumps(result, indent=2, ensure_ascii=False))
106
+ else:
107
+ print(result["masked_text"])
108
+
109
+
110
+ if __name__ == "__main__":
111
+ main()
inference_mask_onnx.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import json
6
+ import os
7
+
8
+ os.environ.setdefault("TRANSFORMERS_NO_TF", "1")
9
+ os.environ.setdefault("TRANSFORMERS_NO_FLAX", "1")
10
+ os.environ.setdefault("TRANSFORMERS_NO_TORCHVISION", "1")
11
+ os.environ["USE_TF"] = "0"
12
+ os.environ["USE_FLAX"] = "0"
13
+ os.environ["USE_TORCH"] = "1"
14
+
15
+ import numpy as np
16
+
17
+ from common import (
18
+ boundary_label_thresholds_from_config,
19
+ decode_token_presence_segments,
20
+ label_max_span_tokens_from_config,
21
+ label_min_nonspace_chars_from_config,
22
+ label_names_from_config,
23
+ load_onnx_session,
24
+ run_onnx_all,
25
+ token_extend_thresholds_from_config,
26
+ token_label_thresholds_from_config,
27
+ )
28
+
29
+
30
+ def replacement(label: str) -> str:
31
+ return f"[PII:{label}]"
32
+
33
+
34
+ def mask_text(text: str, spans: list[dict]) -> str:
35
+ out = text
36
+ for span in sorted(spans, key=lambda item: (item["start"], item["end"]), reverse=True):
37
+ out = out[: span["start"]] + replacement(span["label"]) + out[span["end"] :]
38
+ return out
39
+
40
+
41
+ def predict(text: str, session, tokenizer, config, min_score: float):
42
+ encoded = tokenizer(text, return_offsets_mapping=True, return_tensors="np", truncation=True)
43
+ offsets = [tuple(item) for item in encoded["offset_mapping"][0].tolist()]
44
+ token_logits, start_logits, end_logits = run_onnx_all(session, encoded)
45
+ token_scores = 1.0 / (1.0 + np.exp(-token_logits[0]))
46
+ start_scores = 1.0 / (1.0 + np.exp(-start_logits[0]))
47
+ end_scores = 1.0 / (1.0 + np.exp(-end_logits[0]))
48
+ label_names = label_names_from_config(config)
49
+ thresholds = token_label_thresholds_from_config(config, min_score)
50
+ extend_thresholds = token_extend_thresholds_from_config(config)
51
+ max_span_tokens = label_max_span_tokens_from_config(config)
52
+ min_nonspace_chars = label_min_nonspace_chars_from_config(config)
53
+ boundary_thresholds = boundary_label_thresholds_from_config(config)
54
+ spans = decode_token_presence_segments(
55
+ text,
56
+ offsets,
57
+ token_scores,
58
+ label_names,
59
+ min_score,
60
+ thresholds,
61
+ extend_thresholds,
62
+ max_span_tokens,
63
+ min_nonspace_chars,
64
+ boundary_thresholds,
65
+ start_scores=start_scores,
66
+ end_scores=end_scores,
67
+ )
68
+ for span in spans:
69
+ span["replacement"] = replacement(span["label"])
70
+ return spans
71
+
72
+
73
+ def main() -> None:
74
+ parser = argparse.ArgumentParser()
75
+ parser.add_argument("--model", required=True)
76
+ parser.add_argument("--text", required=True)
77
+ parser.add_argument("--min-score", type=float, default=0.5)
78
+ parser.add_argument("--json", action="store_true")
79
+ args = parser.parse_args()
80
+
81
+ session, tokenizer, config = load_onnx_session(args.model, onnx_file="model_quantized.onnx", onnx_subfolder="onnx")
82
+ spans = predict(args.text, session, tokenizer, config, args.min_score)
83
+ result = {
84
+ "model": args.model,
85
+ "backend": "onnx_token_span_q8",
86
+ "min_score": args.min_score,
87
+ "spans": spans,
88
+ "masked_text": mask_text(args.text, spans),
89
+ }
90
+ if args.json:
91
+ print(json.dumps(result, indent=2, ensure_ascii=False))
92
+ else:
93
+ print(result["masked_text"])
94
+
95
+
96
+ if __name__ == "__main__":
97
+ main()
model.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from transformers import AutoConfig, AutoModel, PreTrainedModel
10
+ from transformers.utils import ModelOutput
11
+
12
+
13
+ def hidden_size_from_config(config) -> int:
14
+ return int(getattr(config, "hidden_size", getattr(config, "dim")))
15
+
16
+
17
+ @dataclass
18
+ class MultilabelSpanOutput(ModelOutput):
19
+ loss: Optional[torch.Tensor] = None
20
+ start_logits: Optional[torch.Tensor] = None
21
+ end_logits: Optional[torch.Tensor] = None
22
+
23
+
24
+ class IrishCoreSpanHeadModel(PreTrainedModel):
25
+ config_class = AutoConfig
26
+ base_model_prefix = "encoder"
27
+
28
+ def __init__(self, config):
29
+ super().__init__(config)
30
+ num_span_labels = int(getattr(config, "num_span_labels"))
31
+ self.encoder = AutoModel.from_config(config)
32
+ hidden_size = hidden_size_from_config(config)
33
+ dropout = float(getattr(config, "seq_classif_dropout", getattr(config, "dropout", 0.1)))
34
+ self.dropout = nn.Dropout(dropout)
35
+ self.start_classifier = nn.Linear(hidden_size, num_span_labels)
36
+ self.end_classifier = nn.Linear(hidden_size, num_span_labels)
37
+ pos_weight = float(getattr(config, "span_positive_weight", 6.0))
38
+ self.register_buffer("loss_pos_weight", torch.full((num_span_labels,), pos_weight), persistent=False)
39
+ self.post_init()
40
+
41
+ def forward(
42
+ self,
43
+ input_ids=None,
44
+ attention_mask=None,
45
+ token_type_ids=None,
46
+ start_positions=None,
47
+ end_positions=None,
48
+ token_mask=None,
49
+ **kwargs,
50
+ ) -> MultilabelSpanOutput:
51
+ encoder_kwargs = {
52
+ "input_ids": input_ids,
53
+ "attention_mask": attention_mask,
54
+ **kwargs,
55
+ }
56
+ if token_type_ids is not None and getattr(self.config, "model_type", "") not in {"distilbert", "roberta"}:
57
+ encoder_kwargs["token_type_ids"] = token_type_ids
58
+ outputs = self.encoder(**encoder_kwargs)
59
+ hidden = self.dropout(outputs.last_hidden_state)
60
+ start_logits = self.start_classifier(hidden)
61
+ end_logits = self.end_classifier(hidden)
62
+
63
+ loss = None
64
+ if start_positions is not None and end_positions is not None:
65
+ if token_mask is None:
66
+ token_mask = attention_mask
67
+ mask = token_mask.float().unsqueeze(-1)
68
+ pos_weight = self.loss_pos_weight.to(start_logits.device)
69
+ bce = nn.BCEWithLogitsLoss(reduction="none", pos_weight=pos_weight)
70
+ start_loss = bce(start_logits, start_positions.float()) * mask
71
+ end_loss = bce(end_logits, end_positions.float()) * mask
72
+ denom = mask.sum().clamp_min(1.0) * start_logits.shape[-1]
73
+ loss = (start_loss.sum() + end_loss.sum()) / (2.0 * denom)
74
+
75
+ return MultilabelSpanOutput(loss=loss, start_logits=start_logits, end_logits=end_logits)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2eed253ea812d1db41a9475aaf3f6c74e90efbcf38d4ec1fee65dad65a3e16d
3
+ size 539050252
multitask_model.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from transformers import AutoConfig, AutoModel, PreTrainedModel
10
+ from transformers.utils import ModelOutput
11
+
12
+ try:
13
+ from .model import hidden_size_from_config
14
+ except ImportError:
15
+ from model import hidden_size_from_config
16
+
17
+
18
+ @dataclass
19
+ class MultitaskSpanOutput(ModelOutput):
20
+ loss: Optional[torch.Tensor] = None
21
+ token_logits: Optional[torch.Tensor] = None
22
+ start_logits: Optional[torch.Tensor] = None
23
+ end_logits: Optional[torch.Tensor] = None
24
+
25
+
26
+ class IrishCoreTokenSpanModel(PreTrainedModel):
27
+ config_class = AutoConfig
28
+ base_model_prefix = "encoder"
29
+
30
+ def __init__(self, config):
31
+ super().__init__(config)
32
+ num_span_labels = int(getattr(config, "num_span_labels"))
33
+ self.encoder = AutoModel.from_config(config)
34
+ hidden_size = hidden_size_from_config(config)
35
+ dropout = float(getattr(config, "seq_classif_dropout", getattr(config, "dropout", 0.1)))
36
+ self.dropout = nn.Dropout(dropout)
37
+ self.token_classifier = nn.Linear(hidden_size, num_span_labels)
38
+ self.start_classifier = nn.Linear(hidden_size, num_span_labels)
39
+ self.end_classifier = nn.Linear(hidden_size, num_span_labels)
40
+ boundary_pos_weight = float(getattr(config, "span_positive_weight", 6.0))
41
+ presence_pos_weight = float(getattr(config, "token_positive_weight", 4.0))
42
+ self.register_buffer("boundary_pos_weight", torch.full((num_span_labels,), boundary_pos_weight), persistent=False)
43
+ self.register_buffer("presence_pos_weight", torch.full((num_span_labels,), presence_pos_weight), persistent=False)
44
+ self.post_init()
45
+
46
+ def forward(
47
+ self,
48
+ input_ids=None,
49
+ attention_mask=None,
50
+ token_type_ids=None,
51
+ token_labels=None,
52
+ start_positions=None,
53
+ end_positions=None,
54
+ token_mask=None,
55
+ **kwargs,
56
+ ) -> MultitaskSpanOutput:
57
+ encoder_kwargs = {
58
+ "input_ids": input_ids,
59
+ "attention_mask": attention_mask,
60
+ **kwargs,
61
+ }
62
+ if token_type_ids is not None and getattr(self.config, "model_type", "") not in {"distilbert", "roberta"}:
63
+ encoder_kwargs["token_type_ids"] = token_type_ids
64
+ outputs = self.encoder(**encoder_kwargs)
65
+ hidden = self.dropout(outputs.last_hidden_state)
66
+ token_logits = self.token_classifier(hidden)
67
+ start_logits = self.start_classifier(hidden)
68
+ end_logits = self.end_classifier(hidden)
69
+
70
+ loss = None
71
+ if token_labels is not None and start_positions is not None and end_positions is not None:
72
+ if token_mask is None:
73
+ token_mask = attention_mask
74
+ mask = token_mask.float().unsqueeze(-1)
75
+ boundary_pos_weight = self.boundary_pos_weight.to(token_logits.device)
76
+ presence_pos_weight = self.presence_pos_weight.to(token_logits.device)
77
+ bce_boundary = nn.BCEWithLogitsLoss(reduction="none", pos_weight=boundary_pos_weight)
78
+ bce_presence = nn.BCEWithLogitsLoss(reduction="none", pos_weight=presence_pos_weight)
79
+ token_loss = bce_presence(token_logits, token_labels.float()) * mask
80
+ start_loss = bce_boundary(start_logits, start_positions.float()) * mask
81
+ end_loss = bce_boundary(end_logits, end_positions.float()) * mask
82
+ denom = mask.sum().clamp_min(1.0) * token_logits.shape[-1]
83
+ token_loss = token_loss.sum() / denom
84
+ boundary_loss = (start_loss.sum() + end_loss.sum()) / (2.0 * denom)
85
+ token_weight = float(getattr(self.config, "token_presence_weight", 1.0))
86
+ boundary_weight = float(getattr(self.config, "boundary_loss_weight", 1.0))
87
+ loss = token_weight * token_loss + boundary_weight * boundary_loss
88
+
89
+ return MultitaskSpanOutput(
90
+ loss=loss,
91
+ token_logits=token_logits,
92
+ start_logits=start_logits,
93
+ end_logits=end_logits,
94
+ )
onnx/config.json ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "IrishCoreTokenSpanModel"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "boundary_label_thresholds": {
8
+ "ACCOUNT_NUMBER": 0.1,
9
+ "BANK_ROUTING_NUMBER": 0.25,
10
+ "CREDIT_DEBIT_CARD": 0.25,
11
+ "EMAIL": 0.2,
12
+ "FIRST_NAME": 0.1,
13
+ "LAST_NAME": 0.1,
14
+ "PASSPORT_NUMBER": 0.25,
15
+ "PHONE_NUMBER": 0.25,
16
+ "POSTCODE": 0.4,
17
+ "PPSN": 0.35,
18
+ "SWIFT_BIC": 0.25
19
+ },
20
+ "boundary_loss_weight": 1.0,
21
+ "dim": 768,
22
+ "dropout": 0.1,
23
+ "dtype": "float32",
24
+ "hidden_dim": 3072,
25
+ "id2label": {
26
+ "0": "O",
27
+ "1": "B-account_number",
28
+ "2": "B-age",
29
+ "3": "B-api_key",
30
+ "4": "B-bank_routing_number",
31
+ "5": "B-biometric_identifier",
32
+ "6": "B-blood_type",
33
+ "7": "B-certificate_license_number",
34
+ "8": "B-city",
35
+ "9": "B-company_name",
36
+ "10": "B-coordinate",
37
+ "11": "B-country",
38
+ "12": "B-county",
39
+ "13": "B-credit_debit_card",
40
+ "14": "B-customer_id",
41
+ "15": "B-cvv",
42
+ "16": "B-date",
43
+ "17": "B-date_of_birth",
44
+ "18": "B-date_time",
45
+ "19": "B-device_identifier",
46
+ "20": "B-education_level",
47
+ "21": "B-email",
48
+ "22": "B-employee_id",
49
+ "23": "B-employment_status",
50
+ "24": "B-fax_number",
51
+ "25": "B-first_name",
52
+ "26": "B-gender",
53
+ "27": "B-health_plan_beneficiary_number",
54
+ "28": "B-http_cookie",
55
+ "29": "B-ipv4",
56
+ "30": "B-ipv6",
57
+ "31": "B-language",
58
+ "32": "B-last_name",
59
+ "33": "B-license_plate",
60
+ "34": "B-mac_address",
61
+ "35": "B-medical_record_number",
62
+ "36": "B-occupation",
63
+ "37": "B-password",
64
+ "38": "B-phone_number",
65
+ "39": "B-pin",
66
+ "40": "B-political_view",
67
+ "41": "B-postcode",
68
+ "42": "B-race_ethnicity",
69
+ "43": "B-religious_belief",
70
+ "44": "B-sexuality",
71
+ "45": "B-ssn",
72
+ "46": "B-state",
73
+ "47": "B-street_address",
74
+ "48": "B-swift_bic",
75
+ "49": "B-tax_id",
76
+ "50": "B-time",
77
+ "51": "B-unique_id",
78
+ "52": "B-url",
79
+ "53": "B-user_name",
80
+ "54": "B-vehicle_identifier",
81
+ "55": "I-account_number",
82
+ "56": "I-api_key",
83
+ "57": "I-biometric_identifier",
84
+ "58": "I-blood_type",
85
+ "59": "I-certificate_license_number",
86
+ "60": "I-city",
87
+ "61": "I-company_name",
88
+ "62": "I-coordinate",
89
+ "63": "I-country",
90
+ "64": "I-county",
91
+ "65": "I-credit_debit_card",
92
+ "66": "I-customer_id",
93
+ "67": "I-date",
94
+ "68": "I-date_of_birth",
95
+ "69": "I-date_time",
96
+ "70": "I-device_identifier",
97
+ "71": "I-education_level",
98
+ "72": "I-email",
99
+ "73": "I-employee_id",
100
+ "74": "I-employment_status",
101
+ "75": "I-fax_number",
102
+ "76": "I-first_name",
103
+ "77": "I-gender",
104
+ "78": "I-health_plan_beneficiary_number",
105
+ "79": "I-http_cookie",
106
+ "80": "I-ipv4",
107
+ "81": "I-ipv6",
108
+ "82": "I-language",
109
+ "83": "I-last_name",
110
+ "84": "I-license_plate",
111
+ "85": "I-mac_address",
112
+ "86": "I-medical_record_number",
113
+ "87": "I-occupation",
114
+ "88": "I-password",
115
+ "89": "I-phone_number",
116
+ "90": "I-pin",
117
+ "91": "I-political_view",
118
+ "92": "I-postcode",
119
+ "93": "I-race_ethnicity",
120
+ "94": "I-religious_belief",
121
+ "95": "I-sexuality",
122
+ "96": "I-ssn",
123
+ "97": "I-state",
124
+ "98": "I-street_address",
125
+ "99": "I-swift_bic",
126
+ "100": "I-tax_id",
127
+ "101": "I-time",
128
+ "102": "I-unique_id",
129
+ "103": "I-url",
130
+ "104": "I-user_name",
131
+ "105": "I-vehicle_identifier",
132
+ "106": "B-PPSN",
133
+ "107": "I-PPSN",
134
+ "108": "B-PASSPORT_NUMBER",
135
+ "109": "I-PASSPORT_NUMBER",
136
+ "110": "I-bank_routing_number"
137
+ },
138
+ "initializer_range": 0.02,
139
+ "label2id": {
140
+ "B-PASSPORT_NUMBER": 108,
141
+ "B-PPSN": 106,
142
+ "B-account_number": 1,
143
+ "B-age": 2,
144
+ "B-api_key": 3,
145
+ "B-bank_routing_number": 4,
146
+ "B-biometric_identifier": 5,
147
+ "B-blood_type": 6,
148
+ "B-certificate_license_number": 7,
149
+ "B-city": 8,
150
+ "B-company_name": 9,
151
+ "B-coordinate": 10,
152
+ "B-country": 11,
153
+ "B-county": 12,
154
+ "B-credit_debit_card": 13,
155
+ "B-customer_id": 14,
156
+ "B-cvv": 15,
157
+ "B-date": 16,
158
+ "B-date_of_birth": 17,
159
+ "B-date_time": 18,
160
+ "B-device_identifier": 19,
161
+ "B-education_level": 20,
162
+ "B-email": 21,
163
+ "B-employee_id": 22,
164
+ "B-employment_status": 23,
165
+ "B-fax_number": 24,
166
+ "B-first_name": 25,
167
+ "B-gender": 26,
168
+ "B-health_plan_beneficiary_number": 27,
169
+ "B-http_cookie": 28,
170
+ "B-ipv4": 29,
171
+ "B-ipv6": 30,
172
+ "B-language": 31,
173
+ "B-last_name": 32,
174
+ "B-license_plate": 33,
175
+ "B-mac_address": 34,
176
+ "B-medical_record_number": 35,
177
+ "B-occupation": 36,
178
+ "B-password": 37,
179
+ "B-phone_number": 38,
180
+ "B-pin": 39,
181
+ "B-political_view": 40,
182
+ "B-postcode": 41,
183
+ "B-race_ethnicity": 42,
184
+ "B-religious_belief": 43,
185
+ "B-sexuality": 44,
186
+ "B-ssn": 45,
187
+ "B-state": 46,
188
+ "B-street_address": 47,
189
+ "B-swift_bic": 48,
190
+ "B-tax_id": 49,
191
+ "B-time": 50,
192
+ "B-unique_id": 51,
193
+ "B-url": 52,
194
+ "B-user_name": 53,
195
+ "B-vehicle_identifier": 54,
196
+ "I-PASSPORT_NUMBER": 109,
197
+ "I-PPSN": 107,
198
+ "I-account_number": 55,
199
+ "I-api_key": 56,
200
+ "I-bank_routing_number": 110,
201
+ "I-biometric_identifier": 57,
202
+ "I-blood_type": 58,
203
+ "I-certificate_license_number": 59,
204
+ "I-city": 60,
205
+ "I-company_name": 61,
206
+ "I-coordinate": 62,
207
+ "I-country": 63,
208
+ "I-county": 64,
209
+ "I-credit_debit_card": 65,
210
+ "I-customer_id": 66,
211
+ "I-date": 67,
212
+ "I-date_of_birth": 68,
213
+ "I-date_time": 69,
214
+ "I-device_identifier": 70,
215
+ "I-education_level": 71,
216
+ "I-email": 72,
217
+ "I-employee_id": 73,
218
+ "I-employment_status": 74,
219
+ "I-fax_number": 75,
220
+ "I-first_name": 76,
221
+ "I-gender": 77,
222
+ "I-health_plan_beneficiary_number": 78,
223
+ "I-http_cookie": 79,
224
+ "I-ipv4": 80,
225
+ "I-ipv6": 81,
226
+ "I-language": 82,
227
+ "I-last_name": 83,
228
+ "I-license_plate": 84,
229
+ "I-mac_address": 85,
230
+ "I-medical_record_number": 86,
231
+ "I-occupation": 87,
232
+ "I-password": 88,
233
+ "I-phone_number": 89,
234
+ "I-pin": 90,
235
+ "I-political_view": 91,
236
+ "I-postcode": 92,
237
+ "I-race_ethnicity": 93,
238
+ "I-religious_belief": 94,
239
+ "I-sexuality": 95,
240
+ "I-ssn": 96,
241
+ "I-state": 97,
242
+ "I-street_address": 98,
243
+ "I-swift_bic": 99,
244
+ "I-tax_id": 100,
245
+ "I-time": 101,
246
+ "I-unique_id": 102,
247
+ "I-url": 103,
248
+ "I-user_name": 104,
249
+ "I-vehicle_identifier": 105,
250
+ "O": 0
251
+ },
252
+ "max_position_embeddings": 512,
253
+ "model_type": "distilbert",
254
+ "n_heads": 12,
255
+ "n_layers": 6,
256
+ "num_span_labels": 11,
257
+ "output_past": true,
258
+ "pad_token_id": 0,
259
+ "qa_dropout": 0.1,
260
+ "seq_classif_dropout": 0.2,
261
+ "sinusoidal_pos_embds": false,
262
+ "span_label_max_span_tokens": {
263
+ "ACCOUNT_NUMBER": 19,
264
+ "BANK_ROUTING_NUMBER": 6,
265
+ "CREDIT_DEBIT_CARD": 13,
266
+ "EMAIL": 16,
267
+ "FIRST_NAME": 5,
268
+ "LAST_NAME": 8,
269
+ "PASSPORT_NUMBER": 9,
270
+ "PHONE_NUMBER": 10,
271
+ "POSTCODE": 8,
272
+ "PPSN": 9,
273
+ "SWIFT_BIC": 8
274
+ },
275
+ "span_label_names": [
276
+ "ACCOUNT_NUMBER",
277
+ "BANK_ROUTING_NUMBER",
278
+ "CREDIT_DEBIT_CARD",
279
+ "EMAIL",
280
+ "FIRST_NAME",
281
+ "LAST_NAME",
282
+ "PASSPORT_NUMBER",
283
+ "PHONE_NUMBER",
284
+ "POSTCODE",
285
+ "PPSN",
286
+ "SWIFT_BIC"
287
+ ],
288
+ "span_label_thresholds": {
289
+ "ACCOUNT_NUMBER": 0.5,
290
+ "BANK_ROUTING_NUMBER": 0.5,
291
+ "CREDIT_DEBIT_CARD": 0.5,
292
+ "EMAIL": 0.5,
293
+ "FIRST_NAME": 0.5,
294
+ "LAST_NAME": 0.5,
295
+ "PASSPORT_NUMBER": 0.5,
296
+ "PHONE_NUMBER": 0.5,
297
+ "POSTCODE": 0.5,
298
+ "PPSN": 0.5,
299
+ "SWIFT_BIC": 0.5
300
+ },
301
+ "span_positive_weight": 6.0,
302
+ "tie_weights_": true,
303
+ "token_extend_thresholds": {
304
+ "ACCOUNT_NUMBER": 0.08,
305
+ "BANK_ROUTING_NUMBER": 0.3,
306
+ "CREDIT_DEBIT_CARD": 0.3,
307
+ "EMAIL": 0.3,
308
+ "FIRST_NAME": 0.3,
309
+ "LAST_NAME": 0.3,
310
+ "PASSPORT_NUMBER": 0.3,
311
+ "PHONE_NUMBER": 0.15,
312
+ "POSTCODE": 0.3,
313
+ "PPSN": 0.3,
314
+ "SWIFT_BIC": 0.3
315
+ },
316
+ "token_label_thresholds": {
317
+ "ACCOUNT_NUMBER": 0.18,
318
+ "BANK_ROUTING_NUMBER": 0.8,
319
+ "CREDIT_DEBIT_CARD": 0.8,
320
+ "EMAIL": 0.95,
321
+ "FIRST_NAME": 0.3,
322
+ "LAST_NAME": 0.4,
323
+ "PASSPORT_NUMBER": 0.8,
324
+ "PHONE_NUMBER": 0.65,
325
+ "POSTCODE": 0.9,
326
+ "PPSN": 0.7,
327
+ "SWIFT_BIC": 0.8
328
+ },
329
+ "token_positive_weight": 4.0,
330
+ "token_presence_weight": 1.0,
331
+ "transformers_version": "4.57.6",
332
+ "vocab_size": 119547
333
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c36363a0e7de6ba3b5b71302f7b1b4c924377be8b559bae89de74a10f85917
3
+ size 539156462
onnx/model.preprocessed.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3377ea0a3cb3dffecd9d1987278478fbaa9dc3c0efafae8897a1e10e83e43ae
3
+ size 539167909
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cea036b097cf9e6fff66489dd58813673d422a497462e2412b48aee3a4e4bf5
3
+ size 411951412
onnx/onnx_export.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "source_model": "models/irishcore-diffmask-135m-rc3p40-rc4blend",
3
+ "onnx_path": "models/irishcore-diffmask-135m-rc3p40-rc4blend_onnx_q8/onnx/model.onnx",
4
+ "task": "multitask-token-span-extraction",
5
+ "opset": 18,
6
+ "max_length": 256,
7
+ "output_names": [
8
+ "token_logits",
9
+ "start_logits",
10
+ "end_logits"
11
+ ]
12
+ }
onnx/quantization.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "source_dir": "models/irishcore-diffmask-135m-rc3p40-rc4blend_onnx_q8/onnx",
3
+ "input_model": "models/irishcore-diffmask-135m-rc3p40-rc4blend_onnx_q8_tmp/model.onnx",
4
+ "preprocessed_input_model": "models/irishcore-diffmask-135m-rc3p40-rc4blend_onnx_q8_tmp/model.preprocessed.onnx",
5
+ "output_model": "models/irishcore-diffmask-135m-rc3p40-rc4blend_onnx_q8_tmp/model_quantized.onnx",
6
+ "weight_type": "qint8",
7
+ "per_channel": true,
8
+ "reduce_range": false,
9
+ "preprocess_applied": true,
10
+ "op_types": [
11
+ "MatMul",
12
+ "Gemm",
13
+ "Attention"
14
+ ],
15
+ "copied_assets": [
16
+ "models/irishcore-diffmask-135m-rc3p40-rc4blend_onnx_q8/onnx/model.onnx",
17
+ "onnx_export.json",
18
+ "config.json",
19
+ "special_tokens_map.json",
20
+ "tokenizer.json",
21
+ "tokenizer_config.json",
22
+ "vocab.txt"
23
+ ],
24
+ "format": "onnx_dynamic_quantized",
25
+ "task": "token-classification"
26
+ }
onnx/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
onnx/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
onnx/tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "extra_special_tokens": {},
48
+ "fix_mistral_regex": true,
49
+ "mask_token": "[MASK]",
50
+ "max_length": 512,
51
+ "model_max_length": 512,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "[PAD]",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "[SEP]",
57
+ "stride": 0,
58
+ "strip_accents": null,
59
+ "tokenize_chinese_chars": true,
60
+ "tokenizer_class": "DistilBertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
+ "unk_token": "[UNK]"
64
+ }
onnx/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "irishcore-diffmask"
3
+ version = "0.1.0"
4
+ description = "IrishCore DiffMask token-span release with dynamic q8 ONNX export"
5
+ requires-python = ">=3.10"
6
+ readme = "README.md"
7
+ license = { text = "Apache-2.0" }
8
+ dependencies = [
9
+ "transformers>=4.41.0",
10
+ "torch",
11
+ "numpy>=1.26.0",
12
+ "onnxruntime>=1.20.0",
13
+ "huggingface_hub>=0.36.0",
14
+ ]
15
+
16
+ [tool.uv]
17
+ package = false
qa_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "release": "IrishCore-DiffMask-135M-v1-rc1",
3
+ "repo_id": "temsa/IrishCore-DiffMask-135M-v1-rc1",
4
+ "decoder": "raw_only_diffmask_token_span",
5
+ "diffusion_style_training": true,
6
+ "runtime_diffusion": false,
7
+ "min_score": 0.5,
8
+ "recommended_backend": "onnx_q8_cpu",
9
+ "onnx_file": "onnx/model_quantized.onnx",
10
+ "full_example": "My PPSN is 1234567TW, my Eircode is D02 X285, and my phone is 087 123 4567.",
11
+ "notes": [
12
+ "No scanner or validator layer is required for release behavior.",
13
+ "Use the bundled inference scripts or import common.decode_token_presence_segments.",
14
+ "The bundled scripts mask with [PII:LABEL] placeholders."
15
+ ]
16
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "extra_special_tokens": {},
48
+ "fix_mistral_regex": true,
49
+ "mask_token": "[MASK]",
50
+ "max_length": 512,
51
+ "model_max_length": 512,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "[PAD]",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "[SEP]",
57
+ "stride": 0,
58
+ "strip_accents": null,
59
+ "tokenize_chinese_chars": true,
60
+ "tokenizer_class": "DistilBertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
+ "unk_token": "[UNK]"
64
+ }
training_sources.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "release": "IrishCore-DiffMask-135M-v1-rc1",
3
+ "base_model": "OpenMed/OpenMed-PII-mLiteClinical-Base-135M-v1",
4
+ "public_references": {
5
+ "rc5": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc5",
6
+ "rc8": "temsa/OpenMed-mLiteClinical-IrishCorePII-135M-v2-rc8"
7
+ },
8
+ "task": "Irish core PII detection and masking in English and Irish Gaelic",
9
+ "coverage": [
10
+ "PPSN",
11
+ "ACCOUNT_NUMBER",
12
+ "BANK_ROUTING_NUMBER",
13
+ "CREDIT_DEBIT_CARD",
14
+ "PASSPORT_NUMBER",
15
+ "POSTCODE",
16
+ "PHONE_NUMBER",
17
+ "EMAIL",
18
+ "FIRST_NAME",
19
+ "LAST_NAME",
20
+ "SWIFT_BIC"
21
+ ],
22
+ "architecture": {
23
+ "family": "DistilBERT-size token-span extractor",
24
+ "diffusion_style_training": true,
25
+ "runtime_diffusion": false,
26
+ "scanner_free": true,
27
+ "validator_free": true,
28
+ "heads": [
29
+ "token_presence_head",
30
+ "typed_start_boundary_head",
31
+ "typed_end_boundary_head"
32
+ ]
33
+ },
34
+ "training_data": {
35
+ "published": [
36
+ "temsa/OpenMed-Irish-CorePII-TrainMix-v1",
37
+ "temsa/OpenMed-Irish-PPSN-Eircode-Spec-v1",
38
+ "joelniklaus/mapa",
39
+ "gretelai/synthetic_pii_finance_multilingual"
40
+ ],
41
+ "local_synthetic_hardening_sets": [
42
+ "irish_dllm_hardening_v1",
43
+ "dllm_gap_patch_v1",
44
+ "dllm_gap_patch_v2",
45
+ "dllm_gap_patch_v3",
46
+ "irish_core_diffmask_v2_mix",
47
+ "irish_core_diffmask_v3_mix",
48
+ "irish_core_diffmask_v4_mix"
49
+ ],
50
+ "selection_note": "The published checkpoint was selected from multiple continuation and interpolation runs to balance Irish core, multilingual PPSN, and hardening performance."
51
+ },
52
+ "training_recipe": {
53
+ "noise_schedule_family": "linear masked denoising schedule",
54
+ "runtime_diffusion": false,
55
+ "train_time_diffusion_steps": 4,
56
+ "start_noise_fraction": 0.65,
57
+ "end_noise_fraction": 0.05,
58
+ "loss": "average BCE losses over token presence and typed boundaries across noised passes"
59
+ },
60
+ "references": [
61
+ {
62
+ "title": "BERT",
63
+ "url": "https://arxiv.org/abs/1810.04805"
64
+ },
65
+ {
66
+ "title": "DistilBERT",
67
+ "url": "https://arxiv.org/abs/1910.01108"
68
+ },
69
+ {
70
+ "title": "Boundary Smoothing for Named Entity Recognition",
71
+ "url": "https://aclanthology.org/2022.acl-long.490/"
72
+ },
73
+ {
74
+ "title": "SPANNER: Named Entity Re-/Recognition as Span Prediction",
75
+ "url": "https://aclanthology.org/2021.acl-long.558/"
76
+ },
77
+ {
78
+ "title": "LLaDA 2.0: Scaling Up Diffusion Language Models to 100B",
79
+ "url": "https://arxiv.org/abs/2512.15745"
80
+ },
81
+ {
82
+ "title": "Scaling Diffusion Language Models via Adaptation from Autoregressive Models",
83
+ "url": "https://arxiv.org/abs/2410.17891"
84
+ }
85
+ ]
86
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff