InfosysResponsibleAiToolKit commited on
Commit
b63cddc
·
1 Parent(s): 9e9a98f

model file

Browse files
Files changed (27) hide show
  1. models/dbertaInjection/config.json +0 -43
  2. models/dbertaInjection/model.safetensors +0 -3
  3. models/dbertaInjection/special_tokens_map.json +0 -9
  4. models/dbertaInjection/tokenizer.json +0 -0
  5. models/dbertaInjection/tokenizer_config.json +0 -16
  6. models/detoxify/config.json +0 -21
  7. models/detoxify/merges.txt +0 -0
  8. models/detoxify/tokenizer.json +0 -0
  9. models/detoxify/toxic_debiased-c7548aa0.ckpt +0 -3
  10. models/detoxify/vocab.json +0 -0
  11. models/multi-qa-mpnet-base-dot-v1/1_Pooling/config.json +0 -7
  12. models/multi-qa-mpnet-base-dot-v1/config.json +0 -23
  13. models/multi-qa-mpnet-base-dot-v1/config_sentence_transformers.json +0 -10
  14. models/multi-qa-mpnet-base-dot-v1/modules.json +0 -14
  15. models/multi-qa-mpnet-base-dot-v1/pytorch_model.bin +0 -3
  16. models/multi-qa-mpnet-base-dot-v1/sentence_bert_config.json +0 -4
  17. models/multi-qa-mpnet-base-dot-v1/special_tokens_map.json +0 -1
  18. models/multi-qa-mpnet-base-dot-v1/tokenizer.json +0 -0
  19. models/multi-qa-mpnet-base-dot-v1/tokenizer_config.json +0 -1
  20. models/multi-qa-mpnet-base-dot-v1/vocab.txt +0 -0
  21. models/restricted-dberta-base-zeroshot-v2/added_tokens.json +0 -3
  22. models/restricted-dberta-base-zeroshot-v2/config.json +0 -43
  23. models/restricted-dberta-base-zeroshot-v2/model.safetensors +0 -3
  24. models/restricted-dberta-base-zeroshot-v2/special_tokens_map.json +0 -15
  25. models/restricted-dberta-base-zeroshot-v2/spm.model +0 -3
  26. models/restricted-dberta-base-zeroshot-v2/tokenizer.json +0 -0
  27. models/restricted-dberta-base-zeroshot-v2/tokenizer_config.json +0 -58
models/dbertaInjection/config.json DELETED
@@ -1,43 +0,0 @@
1
- {
2
- "_name_or_path": "microsoft/deberta-v3-base",
3
- "architectures": [
4
- "DebertaV2ForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "hidden_act": "gelu",
8
- "hidden_dropout_prob": 0.1,
9
- "hidden_size": 768,
10
- "id2label": {
11
- "0": "LEGIT",
12
- "1": "INJECTION"
13
- },
14
- "initializer_range": 0.02,
15
- "intermediate_size": 3072,
16
- "label2id": {
17
- "INJECTION": 1,
18
- "LEGIT": 0
19
- },
20
- "layer_norm_eps": 1e-07,
21
- "max_position_embeddings": 512,
22
- "max_relative_positions": -1,
23
- "model_type": "deberta-v2",
24
- "norm_rel_ebd": "layer_norm",
25
- "num_attention_heads": 12,
26
- "num_hidden_layers": 12,
27
- "pad_token_id": 0,
28
- "pooler_dropout": 0,
29
- "pooler_hidden_act": "gelu",
30
- "pooler_hidden_size": 768,
31
- "pos_att_type": [
32
- "p2c",
33
- "c2p"
34
- ],
35
- "position_biased_input": false,
36
- "position_buckets": 256,
37
- "relative_attention": true,
38
- "share_att_key": true,
39
- "torch_dtype": "float32",
40
- "transformers_version": "4.29.1",
41
- "type_vocab_size": 0,
42
- "vocab_size": 128100
43
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/dbertaInjection/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:16862b96671d118a79b8d3a1352a552913797f9fc3cc7db2e06a05002abfc084
3
- size 737723472
 
 
 
 
models/dbertaInjection/special_tokens_map.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "bos_token": "[CLS]",
3
- "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
- "mask_token": "[MASK]",
6
- "pad_token": "[PAD]",
7
- "sep_token": "[SEP]",
8
- "unk_token": "[UNK]"
9
- }
 
 
 
 
 
 
 
 
 
 
models/dbertaInjection/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
models/dbertaInjection/tokenizer_config.json DELETED
@@ -1,16 +0,0 @@
1
- {
2
- "bos_token": "[CLS]",
3
- "clean_up_tokenization_spaces": true,
4
- "cls_token": "[CLS]",
5
- "do_lower_case": false,
6
- "eos_token": "[SEP]",
7
- "mask_token": "[MASK]",
8
- "model_max_length": 1000000000000000019884624838656,
9
- "pad_token": "[PAD]",
10
- "sep_token": "[SEP]",
11
- "sp_model_kwargs": {},
12
- "split_by_punct": false,
13
- "tokenizer_class": "DebertaV2Tokenizer",
14
- "unk_token": "[UNK]",
15
- "vocab_type": "spm"
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/detoxify/config.json DELETED
@@ -1,21 +0,0 @@
1
- {
2
- "architectures": [
3
- "RobertaForMaskedLM"
4
- ],
5
- "attention_probs_dropout_prob": 0.1,
6
- "bos_token_id": 0,
7
- "eos_token_id": 2,
8
- "hidden_act": "gelu",
9
- "hidden_dropout_prob": 0.1,
10
- "hidden_size": 768,
11
- "initializer_range": 0.02,
12
- "intermediate_size": 3072,
13
- "layer_norm_eps": 1e-05,
14
- "max_position_embeddings": 514,
15
- "model_type": "roberta",
16
- "num_attention_heads": 12,
17
- "num_hidden_layers": 12,
18
- "pad_token_id": 1,
19
- "type_vocab_size": 1,
20
- "vocab_size": 50265
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/detoxify/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
models/detoxify/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
models/detoxify/toxic_debiased-c7548aa0.ckpt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7548aa09468726af4cfdd0959912467784de4a076786913ada9f0f21b68f60d
3
- size 498707273
 
 
 
 
models/detoxify/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
models/multi-qa-mpnet-base-dot-v1/1_Pooling/config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "word_embedding_dimension": 768,
3
- "pooling_mode_cls_token": true,
4
- "pooling_mode_mean_tokens": false,
5
- "pooling_mode_max_tokens": false,
6
- "pooling_mode_mean_sqrt_len_tokens": false
7
- }
 
 
 
 
 
 
 
 
models/multi-qa-mpnet-base-dot-v1/config.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "_name_or_path": "microsoft/mpnet-base",
3
- "architectures": [
4
- "MPNetForMaskedLM"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "eos_token_id": 2,
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
- "layer_norm_eps": 1e-05,
15
- "max_position_embeddings": 514,
16
- "model_type": "mpnet",
17
- "num_attention_heads": 12,
18
- "num_hidden_layers": 12,
19
- "pad_token_id": 1,
20
- "relative_attention_num_buckets": 32,
21
- "transformers_version": "4.8.2",
22
- "vocab_size": 30527
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/multi-qa-mpnet-base-dot-v1/config_sentence_transformers.json DELETED
@@ -1,10 +0,0 @@
1
- {
2
- "__version__": {
3
- "sentence_transformers": "3.0.0.dev0",
4
- "transformers": "4.41.0.dev0",
5
- "pytorch": "2.3.0+cu121"
6
- },
7
- "prompts": {},
8
- "default_prompt_name": null,
9
- "similarity_fn_name": "dot"
10
- }
 
 
 
 
 
 
 
 
 
 
 
models/multi-qa-mpnet-base-dot-v1/modules.json DELETED
@@ -1,14 +0,0 @@
1
- [
2
- {
3
- "idx": 0,
4
- "name": "0",
5
- "path": "",
6
- "type": "sentence_transformers.models.Transformer"
7
- },
8
- {
9
- "idx": 1,
10
- "name": "1",
11
- "path": "1_Pooling",
12
- "type": "sentence_transformers.models.Pooling"
13
- }
14
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/multi-qa-mpnet-base-dot-v1/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e1e76b7a067f72e49c7f571cd8e811f7a1567bec49f17e5eaaea899e7bc2c9e
3
- size 438011953
 
 
 
 
models/multi-qa-mpnet-base-dot-v1/sentence_bert_config.json DELETED
@@ -1,4 +0,0 @@
1
- {
2
- "max_seq_length": 512,
3
- "do_lower_case": false
4
- }
 
 
 
 
 
models/multi-qa-mpnet-base-dot-v1/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
 
 
models/multi-qa-mpnet-base-dot-v1/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
models/multi-qa-mpnet-base-dot-v1/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"do_lower_case": true, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "[UNK]", "pad_token": "<pad>", "mask_token": "<mask>", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "microsoft/mpnet-base", "tokenizer_class": "MPNetTokenizer"}
 
 
models/multi-qa-mpnet-base-dot-v1/vocab.txt DELETED
The diff for this file is too large to render. See raw diff
 
models/restricted-dberta-base-zeroshot-v2/added_tokens.json DELETED
@@ -1,3 +0,0 @@
1
- {
2
- "[MASK]": 128000
3
- }
 
 
 
 
models/restricted-dberta-base-zeroshot-v2/config.json DELETED
@@ -1,43 +0,0 @@
1
- {
2
- "_name_or_path": "microsoft/deberta-v3-base",
3
- "architectures": [
4
- "DebertaV2ForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "hidden_act": "gelu",
8
- "hidden_dropout_prob": 0.1,
9
- "hidden_size": 768,
10
- "id2label": {
11
- "0": "entailment",
12
- "1": "not_entailment"
13
- },
14
- "initializer_range": 0.02,
15
- "intermediate_size": 3072,
16
- "label2id": {
17
- "entailment": 0,
18
- "not_entailment": 1
19
- },
20
- "layer_norm_eps": 1e-07,
21
- "max_position_embeddings": 512,
22
- "max_relative_positions": -1,
23
- "model_type": "deberta-v2",
24
- "norm_rel_ebd": "layer_norm",
25
- "num_attention_heads": 12,
26
- "num_hidden_layers": 12,
27
- "pad_token_id": 0,
28
- "pooler_dropout": 0,
29
- "pooler_hidden_act": "gelu",
30
- "pooler_hidden_size": 768,
31
- "pos_att_type": [
32
- "p2c",
33
- "c2p"
34
- ],
35
- "position_biased_input": false,
36
- "position_buckets": 256,
37
- "relative_attention": true,
38
- "share_att_key": true,
39
- "torch_dtype": "float16",
40
- "transformers_version": "4.37.2",
41
- "type_vocab_size": 0,
42
- "vocab_size": 128100
43
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/restricted-dberta-base-zeroshot-v2/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e8f2af78c828dcbd5243aac40fb87430376f0b8a9c288f4993df3ea3558d557
3
- size 368871908
 
 
 
 
models/restricted-dberta-base-zeroshot-v2/special_tokens_map.json DELETED
@@ -1,15 +0,0 @@
1
- {
2
- "bos_token": "[CLS]",
3
- "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
- "mask_token": "[MASK]",
6
- "pad_token": "[PAD]",
7
- "sep_token": "[SEP]",
8
- "unk_token": {
9
- "content": "[UNK]",
10
- "lstrip": false,
11
- "normalized": true,
12
- "rstrip": false,
13
- "single_word": false
14
- }
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/restricted-dberta-base-zeroshot-v2/spm.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
- size 2464616
 
 
 
 
models/restricted-dberta-base-zeroshot-v2/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
models/restricted-dberta-base-zeroshot-v2/tokenizer_config.json DELETED
@@ -1,58 +0,0 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "[PAD]",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "1": {
12
- "content": "[CLS]",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "2": {
20
- "content": "[SEP]",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "3": {
28
- "content": "[UNK]",
29
- "lstrip": false,
30
- "normalized": true,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "128000": {
36
- "content": "[MASK]",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- }
43
- },
44
- "bos_token": "[CLS]",
45
- "clean_up_tokenization_spaces": true,
46
- "cls_token": "[CLS]",
47
- "do_lower_case": false,
48
- "eos_token": "[SEP]",
49
- "mask_token": "[MASK]",
50
- "model_max_length": 512,
51
- "pad_token": "[PAD]",
52
- "sep_token": "[SEP]",
53
- "sp_model_kwargs": {},
54
- "split_by_punct": false,
55
- "tokenizer_class": "DebertaV2Tokenizer",
56
- "unk_token": "[UNK]",
57
- "vocab_type": "spm"
58
- }