mihass commited on
Commit
b6784df
·
verified ·
1 Parent(s): 3f25327

Adding ONNX file of this model

Browse files

Beep boop I am the [ONNX export bot 🤖🏎️](https://huggingface.co/spaces/onnx/export). On behalf of [mihass](https://huggingface.co/mihass), I would like to add to this repository the model converted to ONNX.

What is ONNX? It stands for "Open Neural Network Exchange", and is the most commonly used open standard for machine learning interoperability. You can find out more at [onnx.ai](https://onnx.ai/)!

The exported ONNX model can be then be consumed by various backends as TensorRT or TVM, or simply be used in a few lines with 🤗 Optimum through ONNX Runtime, check out how [here](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/models)!

README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: mit
3
  tags:
4
  - generated_from_trainer
 
5
  datasets:
6
  - conll2003
7
  metrics:
@@ -13,25 +14,25 @@ model-index:
13
  - name: microsoft-deberta-v3-large_ner_conll2003
14
  results:
15
  - task:
16
- name: Token Classification
17
  type: token-classification
 
18
  dataset:
19
  name: conll2003
20
  type: conll2003
21
  args: conll2003
22
  metrics:
23
- - name: Precision
24
- type: precision
25
  value: 0.9667057052032793
26
- - name: Recall
27
- type: recall
28
  value: 0.972399865365197
29
- - name: F1
30
- type: f1
31
  value: 0.9695444248678582
32
- - name: Accuracy
33
- type: accuracy
34
  value: 0.9945095595965889
 
35
  ---
36
 
37
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
2
  license: mit
3
  tags:
4
  - generated_from_trainer
5
+ - onnx
6
  datasets:
7
  - conll2003
8
  metrics:
 
14
  - name: microsoft-deberta-v3-large_ner_conll2003
15
  results:
16
  - task:
 
17
  type: token-classification
18
+ name: Token Classification
19
  dataset:
20
  name: conll2003
21
  type: conll2003
22
  args: conll2003
23
  metrics:
24
+ - type: precision
 
25
  value: 0.9667057052032793
26
+ name: Precision
27
+ - type: recall
28
  value: 0.972399865365197
29
+ name: Recall
30
+ - type: f1
31
  value: 0.9695444248678582
32
+ name: F1
33
+ - type: accuracy
34
  value: 0.9945095595965889
35
+ name: Accuracy
36
  ---
37
 
38
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
onnx/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "architectures": [
4
+ "DebertaV2ForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 1024,
10
+ "id2label": {
11
+ "0": "O",
12
+ "1": "B-PER",
13
+ "2": "I-PER",
14
+ "3": "B-ORG",
15
+ "4": "I-ORG",
16
+ "5": "B-LOC",
17
+ "6": "I-LOC",
18
+ "7": "B-MISC",
19
+ "8": "I-MISC"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 4096,
23
+ "label2id": {
24
+ "B-LOC": 5,
25
+ "B-MISC": 7,
26
+ "B-ORG": 3,
27
+ "B-PER": 1,
28
+ "I-LOC": 6,
29
+ "I-MISC": 8,
30
+ "I-ORG": 4,
31
+ "I-PER": 2,
32
+ "O": 0
33
+ },
34
+ "layer_norm_eps": 1e-07,
35
+ "legacy": true,
36
+ "max_position_embeddings": 512,
37
+ "max_relative_positions": -1,
38
+ "model_type": "deberta-v2",
39
+ "norm_rel_ebd": "layer_norm",
40
+ "num_attention_heads": 16,
41
+ "num_hidden_layers": 24,
42
+ "pad_token_id": 0,
43
+ "pooler_dropout": 0,
44
+ "pooler_hidden_act": "gelu",
45
+ "pooler_hidden_size": 1024,
46
+ "pos_att_type": [
47
+ "p2c",
48
+ "c2p"
49
+ ],
50
+ "position_biased_input": false,
51
+ "position_buckets": 256,
52
+ "relative_attention": true,
53
+ "share_att_key": true,
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.51.3",
56
+ "type_vocab_size": 0,
57
+ "vocab_size": 128100
58
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e84e0ba832040fbf613996f1052df6bb8689b0c480e33defdb54a68453543463
3
+ size 1737905638
onnx/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
onnx/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
onnx/tokenizer_config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128000": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "eos_token": "[SEP]",
49
+ "extra_special_tokens": {},
50
+ "mask_token": "[MASK]",
51
+ "max_length": 512,
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "sp_model_kwargs": {},
56
+ "split_by_punct": false,
57
+ "stride": 0,
58
+ "tokenizer_class": "DebertaV2Tokenizer",
59
+ "truncation_side": "right",
60
+ "truncation_strategy": "longest_first",
61
+ "unk_token": "[UNK]",
62
+ "vocab_type": "spm"
63
+ }