xamxte commited on
Commit
94aa0f0
·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
Files changed (7) hide show
  1. .gitattributes +35 -0
  2. README.md +188 -0
  3. config.json +442 -0
  4. cwe_label_map.json +207 -0
  5. model.safetensors +3 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +16 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ language:
4
+ - en
5
+ library_name: transformers
6
+ pipeline_tag: text-classification
7
+ tags:
8
+ - cybersecurity
9
+ - vulnerability
10
+ - cwe
11
+ - cve
12
+ - nvd
13
+ - roberta
14
+ base_model: FacebookAI/roberta-base
15
+ datasets:
16
+ - xamxte/cve-to-cwe
17
+ metrics:
18
+ - accuracy
19
+ - f1
20
+ model-index:
21
+ - name: cwe-classifier-roberta-base
22
+ results:
23
+ - task:
24
+ type: text-classification
25
+ name: CWE Classification
26
+ dataset:
27
+ name: cve-to-cwe (test split)
28
+ type: xamxte/cve-to-cwe
29
+ split: test
30
+ metrics:
31
+ - name: Top-1 Accuracy
32
+ type: accuracy
33
+ value: 0.8744
34
+ - name: Top-3 Accuracy
35
+ type: accuracy
36
+ value: 0.9467
37
+ - name: Macro F1
38
+ type: f1
39
+ value: 0.6071
40
+ - task:
41
+ type: text-classification
42
+ name: CWE Classification (CTI-Bench)
43
+ dataset:
44
+ name: CTI-Bench cti-rcm
45
+ type: xashru/cti-bench
46
+ metrics:
47
+ - name: Strict Top-1
48
+ type: accuracy
49
+ value: 0.756
50
+ - name: Hierarchy-aware Top-1
51
+ type: accuracy
52
+ value: 0.865
53
+ ---
54
+
55
+ # CWE Classifier (RoBERTa-base)
56
+
57
+ A fine-tuned RoBERTa-base model that maps CVE (Common Vulnerabilities and Exposures) descriptions to CWE (Common Weakness Enumeration) categories. 125M parameters, 205 CWE classes.
58
+
59
+ ## Performance
60
+
61
+ ### Internal Test Set (27,780 gold-standard samples)
62
+
63
+ | Metric | Score |
64
+ |--------|-------|
65
+ | Top-1 Accuracy | **87.4%** |
66
+ | Top-3 Accuracy | **94.7%** |
67
+ | Macro F1 | **0.607** |
68
+ | Weighted F1 | 0.872 |
69
+
70
+ ### CTI-Bench External Benchmark (NeurIPS 2024, zero training overlap)
71
+
72
+ | Benchmark | Strict Top-1 | Hierarchy-aware Top-1 |
73
+ |-----------|--------------|-----------------------|
74
+ | cti-rcm (2023-2024 CVEs) | 75.6% | **86.5%** |
75
+ | cti-rcm-2021 (2011-2021 CVEs) | 71.8% | **82.8%** |
76
+
77
+ ### Comparison on CTI-Bench cti-rcm (strict exact match)
78
+
79
+ All scores below use the official CTI-Bench evaluation protocol: strict exact CWE ID match.
80
+
81
+ | Model | Params | Type | Top-1 Accuracy | Source |
82
+ |-------|--------|------|---------------|--------|
83
+ | [Sec-Gemini v1](https://security.googleblog.com/2025/04/google-launches-sec-gemini-v1-new.html) (Google)* | — | closed | ~86% | Google Security Blog |
84
+ | [SecLM](https://security.googlecloudcommunity.com/community-blog-42/fueling-ai-innovation-in-secops-products-the-seclm-platform-and-sec-gemini-research-pipeline-3997) (Google)* | — | closed | ~85% | Google Cloud Blog |
85
+ | **This model** | **125M** | **open** | **75.6%** | — |
86
+ | [Foundation-Sec-8B-Reasoning](https://arxiv.org/abs/2601.21051) (Cisco) | 8B | open | 75.3% | arXiv 2601.21051 |
87
+ | [GPT-4](https://arxiv.org/abs/2406.07599) | ~1.7T | closed | 72.0% | CTI-Bench paper |
88
+ | [Foundation-Sec-8B](https://arxiv.org/abs/2504.21039) (Cisco) | 8B | open | 72.0% (±1.7%) | arXiv 2504.21039 |
89
+ | [WhiteRabbitNeo-V2-70B](https://arxiv.org/abs/2504.21039) | 70B | open | 71.1% | arXiv 2504.21039 |
90
+ | [Foundation-Sec-8B-Instruct](https://arxiv.org/abs/2601.21051) (Cisco) | 8B | open | 70.4% | arXiv 2601.21051 |
91
+ | [Llama-Primus](https://huggingface.co/trend-cybertron/Llama-Primus-Base) (Trend Micro) | 8B | open | 67.8% | HuggingFace |
92
+ | [GPT-3.5](https://arxiv.org/abs/2406.07599) | ~175B | closed | 67.2% | CTI-Bench paper |
93
+ | [Gemini 1.5](https://arxiv.org/abs/2406.07599) | — | closed | 66.6% | CTI-Bench paper |
94
+ | [LLaMA3-70B](https://arxiv.org/abs/2406.07599) | 70B | open | 65.9% | CTI-Bench paper |
95
+ | [LLaMA3-8B](https://arxiv.org/abs/2406.07599) | 8B | open | 44.7% | CTI-Bench paper |
96
+
97
+ *\*Sec-Gemini and SecLM scores are approximate, estimated from published comparison charts. Exact values were not reported.*
98
+
99
+ **Competitive with the best open-weight models** at 64x fewer parameters (125M vs 8B).
100
+
101
+ ### Hierarchy-aware evaluation (supplementary)
102
+
103
+ This model predicts specific child CWEs (e.g., CWE-121 Stack Buffer Overflow) while CTI-Bench ground truth often uses generic parent CWEs (e.g., CWE-119 Buffer Overflow). When parent↔child equivalences are counted as correct:
104
+
105
+ | Benchmark | Strict Top-1 | Hierarchy-aware Top-1 |
106
+ |-----------|--------------|-----------------------|
107
+ | cti-rcm (2023-2024 CVEs) | 75.6% | 86.5% (+10.9pp) |
108
+ | cti-rcm-2021 (2011-2021 CVEs) | 71.8% | 82.8% (+11.0pp) |
109
+
110
+ *Note: Other models in the table above were evaluated with strict matching only. Hierarchy-aware scores are not directly comparable and are shown separately for transparency.*
111
+
112
+ ## Usage
113
+
114
+ ```python
115
+ from transformers import pipeline
116
+
117
+ classifier = pipeline("text-classification", model="xamxte/cwe-classifier-roberta-base", top_k=3)
118
+
119
+ result = classifier("A SQL injection vulnerability in the login page allows remote attackers to execute arbitrary SQL commands via the username parameter.")
120
+ print(result)
121
+ # [[{'label': 'CWE-89', 'score': 0.95}, {'label': 'CWE-564', 'score': 0.02}, ...]]
122
+ ```
123
+
124
+ ### Manual inference
125
+
126
+ ```python
127
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
128
+ import torch
129
+ import json
130
+
131
+ model_name = "xamxte/cwe-classifier-roberta-base"
132
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
133
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
134
+
135
+ # Load label map
136
+ from huggingface_hub import hf_hub_download
137
+ label_map_path = hf_hub_download(repo_id=model_name, filename="cwe_label_map.json")
138
+ with open(label_map_path) as f:
139
+ label_map = json.load(f)
140
+ id_to_label = {v: k for k, v in label_map.items()}
141
+
142
+ # Predict
143
+ text = "CVE Description: A buffer overflow in the PNG parser allows remote code execution via crafted image files."
144
+ inputs = tokenizer(text, return_tensors="pt", max_length=384, truncation=True, padding=True)
145
+
146
+ with torch.no_grad():
147
+ logits = model(**inputs).logits
148
+
149
+ top3 = torch.topk(logits, 3)
150
+ for score, idx in zip(top3.values[0], top3.indices[0]):
151
+ print(f"{id_to_label[idx.item()]}: {score.item():.3f}")
152
+ ```
153
+
154
+ ## Training
155
+
156
+ - **Base model:** FacebookAI/roberta-base (125M params)
157
+ - **Dataset:** [xamxte/cve-to-cwe](https://huggingface.co/datasets/xamxte/cve-to-cwe) — 234,770 training samples with Claude Sonnet 4.6 refined labels
158
+ - **Training method:** Two-phase fine-tuning
159
+ - Phase 1: Freeze first 8/12 transformer layers, train classifier head (4 epochs, lr=1e-4)
160
+ - Phase 2: Unfreeze all layers, full fine-tuning (9 epochs, lr=2e-5)
161
+ - **Key hyperparameters:** max_length=384, batch_size=32, label_smoothing=0.1, cosine scheduler, bf16
162
+ - **Hardware:** NVIDIA RTX 5080 (16GB), ~4 hours total
163
+ - **Framework:** HuggingFace Transformers + PyTorch
164
+
165
+ ## Label Quality
166
+
167
+ Training labels were refined using Claude Sonnet 4.6 via the Anthropic Batch API. The test/validation sets contain only "gold standard" samples where NVD and Sonnet labels agree (73.1% agreement rate). See the [dataset card](https://huggingface.co/datasets/xamxte/cve-to-cwe) for details.
168
+
169
+ ## CWE Hierarchy
170
+
171
+ This model predicts **specific (child) CWE categories** where possible. For example, buffer overflows are classified as CWE-121 (Stack) or CWE-122 (Heap) rather than the generic CWE-119. This provides more actionable information for vulnerability triage, but means strict accuracy on benchmarks using parent CWEs appears lower than actual performance.
172
+
173
+ ## Limitations
174
+
175
+ - **205 CWE classes only**: Covers the most common CWEs in NVD. Rare CWEs not in the training set will be mapped to the closest known class.
176
+ - **English only**: Trained on English CVE descriptions from NVD.
177
+ - **Description-based**: Uses only the text description, not CVSS scores, CPE, or other metadata.
178
+ - **Single-label**: Predicts one CWE per CVE, though some vulnerabilities may involve multiple weakness types.
179
+
180
+ ## Citation
181
+
182
+ ```bibtex
183
+ @model{cve_to_cwe_classifier_2025,
184
+ title={CWE Classifier (RoBERTa-base)},
185
+ year={2025},
186
+ url={https://huggingface.co/xamxte/cwe-classifier-roberta-base}
187
+ }
188
+ ```
config.json ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_cross_attention": false,
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "dtype": "float32",
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2",
18
+ "3": "LABEL_3",
19
+ "4": "LABEL_4",
20
+ "5": "LABEL_5",
21
+ "6": "LABEL_6",
22
+ "7": "LABEL_7",
23
+ "8": "LABEL_8",
24
+ "9": "LABEL_9",
25
+ "10": "LABEL_10",
26
+ "11": "LABEL_11",
27
+ "12": "LABEL_12",
28
+ "13": "LABEL_13",
29
+ "14": "LABEL_14",
30
+ "15": "LABEL_15",
31
+ "16": "LABEL_16",
32
+ "17": "LABEL_17",
33
+ "18": "LABEL_18",
34
+ "19": "LABEL_19",
35
+ "20": "LABEL_20",
36
+ "21": "LABEL_21",
37
+ "22": "LABEL_22",
38
+ "23": "LABEL_23",
39
+ "24": "LABEL_24",
40
+ "25": "LABEL_25",
41
+ "26": "LABEL_26",
42
+ "27": "LABEL_27",
43
+ "28": "LABEL_28",
44
+ "29": "LABEL_29",
45
+ "30": "LABEL_30",
46
+ "31": "LABEL_31",
47
+ "32": "LABEL_32",
48
+ "33": "LABEL_33",
49
+ "34": "LABEL_34",
50
+ "35": "LABEL_35",
51
+ "36": "LABEL_36",
52
+ "37": "LABEL_37",
53
+ "38": "LABEL_38",
54
+ "39": "LABEL_39",
55
+ "40": "LABEL_40",
56
+ "41": "LABEL_41",
57
+ "42": "LABEL_42",
58
+ "43": "LABEL_43",
59
+ "44": "LABEL_44",
60
+ "45": "LABEL_45",
61
+ "46": "LABEL_46",
62
+ "47": "LABEL_47",
63
+ "48": "LABEL_48",
64
+ "49": "LABEL_49",
65
+ "50": "LABEL_50",
66
+ "51": "LABEL_51",
67
+ "52": "LABEL_52",
68
+ "53": "LABEL_53",
69
+ "54": "LABEL_54",
70
+ "55": "LABEL_55",
71
+ "56": "LABEL_56",
72
+ "57": "LABEL_57",
73
+ "58": "LABEL_58",
74
+ "59": "LABEL_59",
75
+ "60": "LABEL_60",
76
+ "61": "LABEL_61",
77
+ "62": "LABEL_62",
78
+ "63": "LABEL_63",
79
+ "64": "LABEL_64",
80
+ "65": "LABEL_65",
81
+ "66": "LABEL_66",
82
+ "67": "LABEL_67",
83
+ "68": "LABEL_68",
84
+ "69": "LABEL_69",
85
+ "70": "LABEL_70",
86
+ "71": "LABEL_71",
87
+ "72": "LABEL_72",
88
+ "73": "LABEL_73",
89
+ "74": "LABEL_74",
90
+ "75": "LABEL_75",
91
+ "76": "LABEL_76",
92
+ "77": "LABEL_77",
93
+ "78": "LABEL_78",
94
+ "79": "LABEL_79",
95
+ "80": "LABEL_80",
96
+ "81": "LABEL_81",
97
+ "82": "LABEL_82",
98
+ "83": "LABEL_83",
99
+ "84": "LABEL_84",
100
+ "85": "LABEL_85",
101
+ "86": "LABEL_86",
102
+ "87": "LABEL_87",
103
+ "88": "LABEL_88",
104
+ "89": "LABEL_89",
105
+ "90": "LABEL_90",
106
+ "91": "LABEL_91",
107
+ "92": "LABEL_92",
108
+ "93": "LABEL_93",
109
+ "94": "LABEL_94",
110
+ "95": "LABEL_95",
111
+ "96": "LABEL_96",
112
+ "97": "LABEL_97",
113
+ "98": "LABEL_98",
114
+ "99": "LABEL_99",
115
+ "100": "LABEL_100",
116
+ "101": "LABEL_101",
117
+ "102": "LABEL_102",
118
+ "103": "LABEL_103",
119
+ "104": "LABEL_104",
120
+ "105": "LABEL_105",
121
+ "106": "LABEL_106",
122
+ "107": "LABEL_107",
123
+ "108": "LABEL_108",
124
+ "109": "LABEL_109",
125
+ "110": "LABEL_110",
126
+ "111": "LABEL_111",
127
+ "112": "LABEL_112",
128
+ "113": "LABEL_113",
129
+ "114": "LABEL_114",
130
+ "115": "LABEL_115",
131
+ "116": "LABEL_116",
132
+ "117": "LABEL_117",
133
+ "118": "LABEL_118",
134
+ "119": "LABEL_119",
135
+ "120": "LABEL_120",
136
+ "121": "LABEL_121",
137
+ "122": "LABEL_122",
138
+ "123": "LABEL_123",
139
+ "124": "LABEL_124",
140
+ "125": "LABEL_125",
141
+ "126": "LABEL_126",
142
+ "127": "LABEL_127",
143
+ "128": "LABEL_128",
144
+ "129": "LABEL_129",
145
+ "130": "LABEL_130",
146
+ "131": "LABEL_131",
147
+ "132": "LABEL_132",
148
+ "133": "LABEL_133",
149
+ "134": "LABEL_134",
150
+ "135": "LABEL_135",
151
+ "136": "LABEL_136",
152
+ "137": "LABEL_137",
153
+ "138": "LABEL_138",
154
+ "139": "LABEL_139",
155
+ "140": "LABEL_140",
156
+ "141": "LABEL_141",
157
+ "142": "LABEL_142",
158
+ "143": "LABEL_143",
159
+ "144": "LABEL_144",
160
+ "145": "LABEL_145",
161
+ "146": "LABEL_146",
162
+ "147": "LABEL_147",
163
+ "148": "LABEL_148",
164
+ "149": "LABEL_149",
165
+ "150": "LABEL_150",
166
+ "151": "LABEL_151",
167
+ "152": "LABEL_152",
168
+ "153": "LABEL_153",
169
+ "154": "LABEL_154",
170
+ "155": "LABEL_155",
171
+ "156": "LABEL_156",
172
+ "157": "LABEL_157",
173
+ "158": "LABEL_158",
174
+ "159": "LABEL_159",
175
+ "160": "LABEL_160",
176
+ "161": "LABEL_161",
177
+ "162": "LABEL_162",
178
+ "163": "LABEL_163",
179
+ "164": "LABEL_164",
180
+ "165": "LABEL_165",
181
+ "166": "LABEL_166",
182
+ "167": "LABEL_167",
183
+ "168": "LABEL_168",
184
+ "169": "LABEL_169",
185
+ "170": "LABEL_170",
186
+ "171": "LABEL_171",
187
+ "172": "LABEL_172",
188
+ "173": "LABEL_173",
189
+ "174": "LABEL_174",
190
+ "175": "LABEL_175",
191
+ "176": "LABEL_176",
192
+ "177": "LABEL_177",
193
+ "178": "LABEL_178",
194
+ "179": "LABEL_179",
195
+ "180": "LABEL_180",
196
+ "181": "LABEL_181",
197
+ "182": "LABEL_182",
198
+ "183": "LABEL_183",
199
+ "184": "LABEL_184",
200
+ "185": "LABEL_185",
201
+ "186": "LABEL_186",
202
+ "187": "LABEL_187",
203
+ "188": "LABEL_188",
204
+ "189": "LABEL_189",
205
+ "190": "LABEL_190",
206
+ "191": "LABEL_191",
207
+ "192": "LABEL_192",
208
+ "193": "LABEL_193",
209
+ "194": "LABEL_194",
210
+ "195": "LABEL_195",
211
+ "196": "LABEL_196",
212
+ "197": "LABEL_197",
213
+ "198": "LABEL_198",
214
+ "199": "LABEL_199",
215
+ "200": "LABEL_200",
216
+ "201": "LABEL_201",
217
+ "202": "LABEL_202",
218
+ "203": "LABEL_203",
219
+ "204": "LABEL_204"
220
+ },
221
+ "initializer_range": 0.02,
222
+ "intermediate_size": 3072,
223
+ "is_decoder": false,
224
+ "label2id": {
225
+ "LABEL_0": 0,
226
+ "LABEL_1": 1,
227
+ "LABEL_10": 10,
228
+ "LABEL_100": 100,
229
+ "LABEL_101": 101,
230
+ "LABEL_102": 102,
231
+ "LABEL_103": 103,
232
+ "LABEL_104": 104,
233
+ "LABEL_105": 105,
234
+ "LABEL_106": 106,
235
+ "LABEL_107": 107,
236
+ "LABEL_108": 108,
237
+ "LABEL_109": 109,
238
+ "LABEL_11": 11,
239
+ "LABEL_110": 110,
240
+ "LABEL_111": 111,
241
+ "LABEL_112": 112,
242
+ "LABEL_113": 113,
243
+ "LABEL_114": 114,
244
+ "LABEL_115": 115,
245
+ "LABEL_116": 116,
246
+ "LABEL_117": 117,
247
+ "LABEL_118": 118,
248
+ "LABEL_119": 119,
249
+ "LABEL_12": 12,
250
+ "LABEL_120": 120,
251
+ "LABEL_121": 121,
252
+ "LABEL_122": 122,
253
+ "LABEL_123": 123,
254
+ "LABEL_124": 124,
255
+ "LABEL_125": 125,
256
+ "LABEL_126": 126,
257
+ "LABEL_127": 127,
258
+ "LABEL_128": 128,
259
+ "LABEL_129": 129,
260
+ "LABEL_13": 13,
261
+ "LABEL_130": 130,
262
+ "LABEL_131": 131,
263
+ "LABEL_132": 132,
264
+ "LABEL_133": 133,
265
+ "LABEL_134": 134,
266
+ "LABEL_135": 135,
267
+ "LABEL_136": 136,
268
+ "LABEL_137": 137,
269
+ "LABEL_138": 138,
270
+ "LABEL_139": 139,
271
+ "LABEL_14": 14,
272
+ "LABEL_140": 140,
273
+ "LABEL_141": 141,
274
+ "LABEL_142": 142,
275
+ "LABEL_143": 143,
276
+ "LABEL_144": 144,
277
+ "LABEL_145": 145,
278
+ "LABEL_146": 146,
279
+ "LABEL_147": 147,
280
+ "LABEL_148": 148,
281
+ "LABEL_149": 149,
282
+ "LABEL_15": 15,
283
+ "LABEL_150": 150,
284
+ "LABEL_151": 151,
285
+ "LABEL_152": 152,
286
+ "LABEL_153": 153,
287
+ "LABEL_154": 154,
288
+ "LABEL_155": 155,
289
+ "LABEL_156": 156,
290
+ "LABEL_157": 157,
291
+ "LABEL_158": 158,
292
+ "LABEL_159": 159,
293
+ "LABEL_16": 16,
294
+ "LABEL_160": 160,
295
+ "LABEL_161": 161,
296
+ "LABEL_162": 162,
297
+ "LABEL_163": 163,
298
+ "LABEL_164": 164,
299
+ "LABEL_165": 165,
300
+ "LABEL_166": 166,
301
+ "LABEL_167": 167,
302
+ "LABEL_168": 168,
303
+ "LABEL_169": 169,
304
+ "LABEL_17": 17,
305
+ "LABEL_170": 170,
306
+ "LABEL_171": 171,
307
+ "LABEL_172": 172,
308
+ "LABEL_173": 173,
309
+ "LABEL_174": 174,
310
+ "LABEL_175": 175,
311
+ "LABEL_176": 176,
312
+ "LABEL_177": 177,
313
+ "LABEL_178": 178,
314
+ "LABEL_179": 179,
315
+ "LABEL_18": 18,
316
+ "LABEL_180": 180,
317
+ "LABEL_181": 181,
318
+ "LABEL_182": 182,
319
+ "LABEL_183": 183,
320
+ "LABEL_184": 184,
321
+ "LABEL_185": 185,
322
+ "LABEL_186": 186,
323
+ "LABEL_187": 187,
324
+ "LABEL_188": 188,
325
+ "LABEL_189": 189,
326
+ "LABEL_19": 19,
327
+ "LABEL_190": 190,
328
+ "LABEL_191": 191,
329
+ "LABEL_192": 192,
330
+ "LABEL_193": 193,
331
+ "LABEL_194": 194,
332
+ "LABEL_195": 195,
333
+ "LABEL_196": 196,
334
+ "LABEL_197": 197,
335
+ "LABEL_198": 198,
336
+ "LABEL_199": 199,
337
+ "LABEL_2": 2,
338
+ "LABEL_20": 20,
339
+ "LABEL_200": 200,
340
+ "LABEL_201": 201,
341
+ "LABEL_202": 202,
342
+ "LABEL_203": 203,
343
+ "LABEL_204": 204,
344
+ "LABEL_21": 21,
345
+ "LABEL_22": 22,
346
+ "LABEL_23": 23,
347
+ "LABEL_24": 24,
348
+ "LABEL_25": 25,
349
+ "LABEL_26": 26,
350
+ "LABEL_27": 27,
351
+ "LABEL_28": 28,
352
+ "LABEL_29": 29,
353
+ "LABEL_3": 3,
354
+ "LABEL_30": 30,
355
+ "LABEL_31": 31,
356
+ "LABEL_32": 32,
357
+ "LABEL_33": 33,
358
+ "LABEL_34": 34,
359
+ "LABEL_35": 35,
360
+ "LABEL_36": 36,
361
+ "LABEL_37": 37,
362
+ "LABEL_38": 38,
363
+ "LABEL_39": 39,
364
+ "LABEL_4": 4,
365
+ "LABEL_40": 40,
366
+ "LABEL_41": 41,
367
+ "LABEL_42": 42,
368
+ "LABEL_43": 43,
369
+ "LABEL_44": 44,
370
+ "LABEL_45": 45,
371
+ "LABEL_46": 46,
372
+ "LABEL_47": 47,
373
+ "LABEL_48": 48,
374
+ "LABEL_49": 49,
375
+ "LABEL_5": 5,
376
+ "LABEL_50": 50,
377
+ "LABEL_51": 51,
378
+ "LABEL_52": 52,
379
+ "LABEL_53": 53,
380
+ "LABEL_54": 54,
381
+ "LABEL_55": 55,
382
+ "LABEL_56": 56,
383
+ "LABEL_57": 57,
384
+ "LABEL_58": 58,
385
+ "LABEL_59": 59,
386
+ "LABEL_6": 6,
387
+ "LABEL_60": 60,
388
+ "LABEL_61": 61,
389
+ "LABEL_62": 62,
390
+ "LABEL_63": 63,
391
+ "LABEL_64": 64,
392
+ "LABEL_65": 65,
393
+ "LABEL_66": 66,
394
+ "LABEL_67": 67,
395
+ "LABEL_68": 68,
396
+ "LABEL_69": 69,
397
+ "LABEL_7": 7,
398
+ "LABEL_70": 70,
399
+ "LABEL_71": 71,
400
+ "LABEL_72": 72,
401
+ "LABEL_73": 73,
402
+ "LABEL_74": 74,
403
+ "LABEL_75": 75,
404
+ "LABEL_76": 76,
405
+ "LABEL_77": 77,
406
+ "LABEL_78": 78,
407
+ "LABEL_79": 79,
408
+ "LABEL_8": 8,
409
+ "LABEL_80": 80,
410
+ "LABEL_81": 81,
411
+ "LABEL_82": 82,
412
+ "LABEL_83": 83,
413
+ "LABEL_84": 84,
414
+ "LABEL_85": 85,
415
+ "LABEL_86": 86,
416
+ "LABEL_87": 87,
417
+ "LABEL_88": 88,
418
+ "LABEL_89": 89,
419
+ "LABEL_9": 9,
420
+ "LABEL_90": 90,
421
+ "LABEL_91": 91,
422
+ "LABEL_92": 92,
423
+ "LABEL_93": 93,
424
+ "LABEL_94": 94,
425
+ "LABEL_95": 95,
426
+ "LABEL_96": 96,
427
+ "LABEL_97": 97,
428
+ "LABEL_98": 98,
429
+ "LABEL_99": 99
430
+ },
431
+ "layer_norm_eps": 1e-05,
432
+ "max_position_embeddings": 514,
433
+ "model_type": "roberta",
434
+ "num_attention_heads": 12,
435
+ "num_hidden_layers": 12,
436
+ "pad_token_id": 1,
437
+ "tie_word_embeddings": true,
438
+ "transformers_version": "5.3.0",
439
+ "type_vocab_size": 1,
440
+ "use_cache": false,
441
+ "vocab_size": 50265
442
+ }
cwe_label_map.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "CWE-1021": 0,
3
+ "CWE-113": 1,
4
+ "CWE-116": 2,
5
+ "CWE-117": 3,
6
+ "CWE-1188": 4,
7
+ "CWE-119": 5,
8
+ "CWE-120": 6,
9
+ "CWE-121": 7,
10
+ "CWE-122": 8,
11
+ "CWE-1220": 9,
12
+ "CWE-1236": 10,
13
+ "CWE-125": 11,
14
+ "CWE-126": 12,
15
+ "CWE-1284": 13,
16
+ "CWE-1286": 14,
17
+ "CWE-1287": 15,
18
+ "CWE-129": 16,
19
+ "CWE-130": 17,
20
+ "CWE-131": 18,
21
+ "CWE-1321": 19,
22
+ "CWE-1333": 20,
23
+ "CWE-1336": 21,
24
+ "CWE-134": 22,
25
+ "CWE-1390": 23,
26
+ "CWE-1392": 24,
27
+ "CWE-16": 25,
28
+ "CWE-17": 26,
29
+ "CWE-184": 27,
30
+ "CWE-189": 28,
31
+ "CWE-19": 29,
32
+ "CWE-190": 30,
33
+ "CWE-191": 31,
34
+ "CWE-193": 32,
35
+ "CWE-20": 33,
36
+ "CWE-200": 34,
37
+ "CWE-201": 35,
38
+ "CWE-203": 36,
39
+ "CWE-204": 37,
40
+ "CWE-208": 38,
41
+ "CWE-209": 39,
42
+ "CWE-212": 40,
43
+ "CWE-22": 41,
44
+ "CWE-23": 42,
45
+ "CWE-24": 43,
46
+ "CWE-248": 44,
47
+ "CWE-250": 45,
48
+ "CWE-252": 46,
49
+ "CWE-254": 47,
50
+ "CWE-255": 48,
51
+ "CWE-256": 49,
52
+ "CWE-257": 50,
53
+ "CWE-259": 51,
54
+ "CWE-264": 52,
55
+ "CWE-266": 53,
56
+ "CWE-267": 54,
57
+ "CWE-269": 55,
58
+ "CWE-275": 56,
59
+ "CWE-276": 57,
60
+ "CWE-277": 58,
61
+ "CWE-280": 59,
62
+ "CWE-281": 60,
63
+ "CWE-284": 61,
64
+ "CWE-285": 62,
65
+ "CWE-287": 63,
66
+ "CWE-288": 64,
67
+ "CWE-29": 65,
68
+ "CWE-290": 66,
69
+ "CWE-294": 67,
70
+ "CWE-295": 68,
71
+ "CWE-300": 69,
72
+ "CWE-303": 70,
73
+ "CWE-305": 71,
74
+ "CWE-306": 72,
75
+ "CWE-307": 73,
76
+ "CWE-310": 74,
77
+ "CWE-311": 75,
78
+ "CWE-312": 76,
79
+ "CWE-319": 77,
80
+ "CWE-320": 78,
81
+ "CWE-321": 79,
82
+ "CWE-326": 80,
83
+ "CWE-327": 81,
84
+ "CWE-330": 82,
85
+ "CWE-331": 83,
86
+ "CWE-338": 84,
87
+ "CWE-345": 85,
88
+ "CWE-346": 86,
89
+ "CWE-347": 87,
90
+ "CWE-35": 88,
91
+ "CWE-352": 89,
92
+ "CWE-354": 90,
93
+ "CWE-358": 91,
94
+ "CWE-359": 92,
95
+ "CWE-36": 93,
96
+ "CWE-362": 94,
97
+ "CWE-367": 95,
98
+ "CWE-369": 96,
99
+ "CWE-377": 97,
100
+ "CWE-384": 98,
101
+ "CWE-399": 99,
102
+ "CWE-400": 100,
103
+ "CWE-401": 101,
104
+ "CWE-404": 102,
105
+ "CWE-407": 103,
106
+ "CWE-415": 104,
107
+ "CWE-416": 105,
108
+ "CWE-425": 106,
109
+ "CWE-426": 107,
110
+ "CWE-427": 108,
111
+ "CWE-428": 109,
112
+ "CWE-434": 110,
113
+ "CWE-436": 111,
114
+ "CWE-441": 112,
115
+ "CWE-444": 113,
116
+ "CWE-451": 114,
117
+ "CWE-457": 115,
118
+ "CWE-459": 116,
119
+ "CWE-472": 117,
120
+ "CWE-476": 118,
121
+ "CWE-489": 119,
122
+ "CWE-494": 120,
123
+ "CWE-497": 121,
124
+ "CWE-502": 122,
125
+ "CWE-506": 123,
126
+ "CWE-521": 124,
127
+ "CWE-522": 125,
128
+ "CWE-532": 126,
129
+ "CWE-538": 127,
130
+ "CWE-548": 128,
131
+ "CWE-552": 129,
132
+ "CWE-565": 130,
133
+ "CWE-59": 131,
134
+ "CWE-591": 132,
135
+ "CWE-598": 133,
136
+ "CWE-601": 134,
137
+ "CWE-602": 135,
138
+ "CWE-61": 136,
139
+ "CWE-610": 137,
140
+ "CWE-611": 138,
141
+ "CWE-613": 139,
142
+ "CWE-617": 140,
143
+ "CWE-620": 141,
144
+ "CWE-639": 142,
145
+ "CWE-640": 143,
146
+ "CWE-648": 144,
147
+ "CWE-665": 145,
148
+ "CWE-667": 146,
149
+ "CWE-668": 147,
150
+ "CWE-669": 148,
151
+ "CWE-670": 149,
152
+ "CWE-674": 150,
153
+ "CWE-680": 151,
154
+ "CWE-681": 152,
155
+ "CWE-682": 153,
156
+ "CWE-693": 154,
157
+ "CWE-697": 155,
158
+ "CWE-703": 156,
159
+ "CWE-704": 157,
160
+ "CWE-706": 158,
161
+ "CWE-707": 159,
162
+ "CWE-73": 160,
163
+ "CWE-732": 161,
164
+ "CWE-74": 162,
165
+ "CWE-749": 163,
166
+ "CWE-754": 164,
167
+ "CWE-755": 165,
168
+ "CWE-763": 166,
169
+ "CWE-77": 167,
170
+ "CWE-770": 168,
171
+ "CWE-772": 169,
172
+ "CWE-776": 170,
173
+ "CWE-78": 171,
174
+ "CWE-787": 172,
175
+ "CWE-788": 173,
176
+ "CWE-789": 174,
177
+ "CWE-79": 175,
178
+ "CWE-798": 176,
179
+ "CWE-80": 177,
180
+ "CWE-822": 178,
181
+ "CWE-823": 179,
182
+ "CWE-824": 180,
183
+ "CWE-829": 181,
184
+ "CWE-834": 182,
185
+ "CWE-835": 183,
186
+ "CWE-840": 184,
187
+ "CWE-843": 185,
188
+ "CWE-862": 186,
189
+ "CWE-863": 187,
190
+ "CWE-88": 188,
191
+ "CWE-89": 189,
192
+ "CWE-908": 190,
193
+ "CWE-909": 191,
194
+ "CWE-91": 192,
195
+ "CWE-912": 193,
196
+ "CWE-913": 194,
197
+ "CWE-916": 195,
198
+ "CWE-917": 196,
199
+ "CWE-918": 197,
200
+ "CWE-922": 198,
201
+ "CWE-926": 199,
202
+ "CWE-93": 200,
203
+ "CWE-94": 201,
204
+ "CWE-95": 202,
205
+ "CWE-98": 203,
206
+ "CWE-OTHER": 204
207
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0cb92e88ac2fbe698ccc77ca23fecc4ad810f99307affa2fc3ac7b14a8344c
3
+ size 499237228
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<s>",
5
+ "cls_token": "<s>",
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "is_local": false,
9
+ "mask_token": "<mask>",
10
+ "model_max_length": 512,
11
+ "pad_token": "<pad>",
12
+ "sep_token": "</s>",
13
+ "tokenizer_class": "RobertaTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
16
+ }