rafaelsandroni commited on
Commit
57941d5
·
verified ·
1 Parent(s): afdcd86

Add ONNX converted GLiNER models

Browse files
added_tokens.json CHANGED
@@ -1,6 +1,3 @@
1
  {
2
- "<<ENT>>": 128002,
3
- "<<SEP>>": 128003,
4
- "[FLERT]": 128001,
5
  "[MASK]": 128000
6
  }
 
1
  {
 
 
 
2
  "[MASK]": 128000
3
  }
gliner_config.json CHANGED
@@ -1,10 +1,8 @@
1
  {
2
  "class_token_index": 128002,
3
- "decoder_mode": null,
4
  "dropout": 0.4,
5
  "embed_ent_token": true,
6
  "encoder_config": {
7
- "_attn_implementation_autoset": true,
8
  "_name_or_path": "microsoft/deberta-v3-large",
9
  "add_cross_attention": false,
10
  "architectures": null,
@@ -17,6 +15,7 @@
17
  "decoder_start_token_id": null,
18
  "diversity_penalty": 0.0,
19
  "do_sample": false,
 
20
  "early_stopping": false,
21
  "encoder_no_repeat_ngram_size": 0,
22
  "eos_token_id": null,
@@ -86,7 +85,6 @@
86
  "tokenizer_class": null,
87
  "top_k": 50,
88
  "top_p": 1.0,
89
- "torch_dtype": null,
90
  "torchscript": false,
91
  "type_vocab_size": 0,
92
  "typical_p": 1.0,
@@ -96,12 +94,9 @@
96
  "ent_token": "<<ENT>>",
97
  "eval_every": 5000,
98
  "fine_tune": true,
99
- "full_decoder_context": true,
100
  "fuse_layers": false,
101
  "has_rnn": true,
102
  "hidden_size": 512,
103
- "labels_decoder": null,
104
- "labels_decoder_config": null,
105
  "labels_encoder": null,
106
  "labels_encoder_config": null,
107
  "lr_encoder": "1e-5",
@@ -123,7 +118,7 @@
123
  "span_mode": "markerV0",
124
  "subtoken_pooling": "first",
125
  "train_batch_size": 8,
126
- "transformers_version": "4.51.0",
127
  "vocab_size": 128004,
128
  "warmup_ratio": 3000,
129
  "words_splitter_type": "whitespace"
 
1
  {
2
  "class_token_index": 128002,
 
3
  "dropout": 0.4,
4
  "embed_ent_token": true,
5
  "encoder_config": {
 
6
  "_name_or_path": "microsoft/deberta-v3-large",
7
  "add_cross_attention": false,
8
  "architectures": null,
 
15
  "decoder_start_token_id": null,
16
  "diversity_penalty": 0.0,
17
  "do_sample": false,
18
+ "dtype": null,
19
  "early_stopping": false,
20
  "encoder_no_repeat_ngram_size": 0,
21
  "eos_token_id": null,
 
85
  "tokenizer_class": null,
86
  "top_k": 50,
87
  "top_p": 1.0,
 
88
  "torchscript": false,
89
  "type_vocab_size": 0,
90
  "typical_p": 1.0,
 
94
  "ent_token": "<<ENT>>",
95
  "eval_every": 5000,
96
  "fine_tune": true,
 
97
  "fuse_layers": false,
98
  "has_rnn": true,
99
  "hidden_size": 512,
 
 
100
  "labels_encoder": null,
101
  "labels_encoder_config": null,
102
  "lr_encoder": "1e-5",
 
118
  "span_mode": "markerV0",
119
  "subtoken_pooling": "first",
120
  "train_batch_size": 8,
121
+ "transformers_version": "4.57.1",
122
  "vocab_size": 128004,
123
  "warmup_ratio": 3000,
124
  "words_splitter_type": "whitespace"
onnx/model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7989184095f8e1ff5ccbf9c528bd02e0d103709cd83dad55f9c5f0e04a769f1a
3
- size 1784440836
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3659f07e42e3732af9593fdab0fe0809efd58afe5f6fb4442023fed1c6b5086
3
+ size 1784443906
onnx/model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae9497bc1e798d85a0e37320a76d167f56e8657018e8fa2fc006d5cee9de8e53
3
- size 653173154
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2777e8503a56616133521552de74ffcde9770a8355224ddf38dd08b3dfcb796e
3
+ size 653177552
special_tokens_map.json CHANGED
@@ -1,46 +1,10 @@
1
  {
2
- "bos_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "[CLS]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "[SEP]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "[MASK]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "[PAD]",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "[SEP]",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
  "unk_token": {
45
  "content": "[UNK]",
46
  "lstrip": false,
 
1
  {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "unk_token": {
9
  "content": "[UNK]",
10
  "lstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -39,30 +39,6 @@
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
- },
43
- "128001": {
44
- "content": "[FLERT]",
45
- "lstrip": false,
46
- "normalized": true,
47
- "rstrip": false,
48
- "single_word": false,
49
- "special": false
50
- },
51
- "128002": {
52
- "content": "<<ENT>>",
53
- "lstrip": false,
54
- "normalized": true,
55
- "rstrip": false,
56
- "single_word": false,
57
- "special": false
58
- },
59
- "128003": {
60
- "content": "<<SEP>>",
61
- "lstrip": false,
62
- "normalized": true,
63
- "rstrip": false,
64
- "single_word": false,
65
- "special": false
66
  }
67
  },
68
  "bos_token": "[CLS]",
@@ -72,12 +48,8 @@
72
  "eos_token": "[SEP]",
73
  "extra_special_tokens": {},
74
  "mask_token": "[MASK]",
75
- "max_length": null,
76
  "model_max_length": 1000000000000000019884624838656,
77
- "pad_to_multiple_of": null,
78
  "pad_token": "[PAD]",
79
- "pad_token_type_id": 0,
80
- "padding_side": "right",
81
  "sep_token": "[SEP]",
82
  "sp_model_kwargs": {},
83
  "split_by_punct": false,
 
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  }
43
  },
44
  "bos_token": "[CLS]",
 
48
  "eos_token": "[SEP]",
49
  "extra_special_tokens": {},
50
  "mask_token": "[MASK]",
 
51
  "model_max_length": 1000000000000000019884624838656,
 
52
  "pad_token": "[PAD]",
 
 
53
  "sep_token": "[SEP]",
54
  "sp_model_kwargs": {},
55
  "split_by_punct": false,