Adding ONNX file of this model
Browse filesBeep boop I am the [ONNX export bot 🤖🏎️](https://huggingface.co/spaces/onnx/export). On behalf of [walex3232](https://huggingface.co/walex3232), I would like to add to this repository the model converted to ONNX.
What is ONNX? It stands for "Open Neural Network Exchange", and is the most commonly used open standard for machine learning interoperability. You can find out more at [onnx.ai](https://onnx.ai/)!
The exported ONNX model can be then be consumed by various backends as TensorRT or TVM, or simply be used in a few lines with 🤗 Optimum through ONNX Runtime, check out how [here](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/models)!
- onnx/config.json +1 -1
- onnx/decoder_model.onnx +2 -2
- onnx/decoder_model_merged.onnx +1 -1
- onnx/decoder_with_past_model.onnx +2 -2
- onnx/encoder_model.onnx +2 -2
- onnx/generation_config.json +1 -1
- onnx/special_tokens_map.json +21 -3
- onnx/tokenizer.json +4 -2
onnx/config.json
CHANGED
|
@@ -54,7 +54,7 @@
|
|
| 54 |
"prefix": "translate English to Romanian: "
|
| 55 |
}
|
| 56 |
},
|
| 57 |
-
"transformers_version": "4.
|
| 58 |
"use_cache": true,
|
| 59 |
"vocab_size": 32128
|
| 60 |
}
|
|
|
|
| 54 |
"prefix": "translate English to Romanian: "
|
| 55 |
}
|
| 56 |
},
|
| 57 |
+
"transformers_version": "4.37.2",
|
| 58 |
"use_cache": true,
|
| 59 |
"vocab_size": 32128
|
| 60 |
}
|
onnx/decoder_model.onnx
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ee7e59617e5da19129a9d2af10c5a3e3f2cd08cee067c6b93886395e80424a0
|
| 3 |
+
size 232468526
|
onnx/decoder_model_merged.onnx
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 232630636
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd0dfaffc4cf1e534e5d84fcab8a7903e7a2876a54e69705fefaab1eb5edadd3
|
| 3 |
size 232630636
|
onnx/decoder_with_past_model.onnx
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cae6ffff9e2d5e5837ed04640cd7d2a81fa42bef6fcfa498ed5fcb55fc6c243c
|
| 3 |
+
size 219874666
|
onnx/encoder_model.onnx
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c01ca6add58782255a52459a6de06400a4b27f20be4c922c64fc1907a94013bc
|
| 3 |
+
size 141404303
|
onnx/generation_config.json
CHANGED
|
@@ -2,5 +2,5 @@
|
|
| 2 |
"decoder_start_token_id": 0,
|
| 3 |
"eos_token_id": 1,
|
| 4 |
"pad_token_id": 0,
|
| 5 |
-
"transformers_version": "4.
|
| 6 |
}
|
|
|
|
| 2 |
"decoder_start_token_id": 0,
|
| 3 |
"eos_token_id": 1,
|
| 4 |
"pad_token_id": 0,
|
| 5 |
+
"transformers_version": "4.37.2"
|
| 6 |
}
|
onnx/special_tokens_map.json
CHANGED
|
@@ -101,7 +101,25 @@
|
|
| 101 |
"<extra_id_98>",
|
| 102 |
"<extra_id_99>"
|
| 103 |
],
|
| 104 |
-
"eos_token":
|
| 105 |
-
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
}
|
|
|
|
| 101 |
"<extra_id_98>",
|
| 102 |
"<extra_id_99>"
|
| 103 |
],
|
| 104 |
+
"eos_token": {
|
| 105 |
+
"content": "</s>",
|
| 106 |
+
"lstrip": false,
|
| 107 |
+
"normalized": false,
|
| 108 |
+
"rstrip": false,
|
| 109 |
+
"single_word": false
|
| 110 |
+
},
|
| 111 |
+
"pad_token": {
|
| 112 |
+
"content": "<pad>",
|
| 113 |
+
"lstrip": false,
|
| 114 |
+
"normalized": false,
|
| 115 |
+
"rstrip": false,
|
| 116 |
+
"single_word": false
|
| 117 |
+
},
|
| 118 |
+
"unk_token": {
|
| 119 |
+
"content": "<unk>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false
|
| 124 |
+
}
|
| 125 |
}
|
onnx/tokenizer.json
CHANGED
|
@@ -949,7 +949,8 @@
|
|
| 949 |
{
|
| 950 |
"type": "Metaspace",
|
| 951 |
"replacement": "▁",
|
| 952 |
-
"add_prefix_space": true
|
|
|
|
| 953 |
}
|
| 954 |
]
|
| 955 |
},
|
|
@@ -1010,7 +1011,8 @@
|
|
| 1010 |
"decoder": {
|
| 1011 |
"type": "Metaspace",
|
| 1012 |
"replacement": "▁",
|
| 1013 |
-
"add_prefix_space": true
|
|
|
|
| 1014 |
},
|
| 1015 |
"model": {
|
| 1016 |
"type": "Unigram",
|
|
|
|
| 949 |
{
|
| 950 |
"type": "Metaspace",
|
| 951 |
"replacement": "▁",
|
| 952 |
+
"add_prefix_space": true,
|
| 953 |
+
"prepend_scheme": "always"
|
| 954 |
}
|
| 955 |
]
|
| 956 |
},
|
|
|
|
| 1011 |
"decoder": {
|
| 1012 |
"type": "Metaspace",
|
| 1013 |
"replacement": "▁",
|
| 1014 |
+
"add_prefix_space": true,
|
| 1015 |
+
"prepend_scheme": "always"
|
| 1016 |
},
|
| 1017 |
"model": {
|
| 1018 |
"type": "Unigram",
|