Den4ikAI commited on
Commit
7b35477
·
1 Parent(s): 6ae880f

Upload 6 files

Browse files
Files changed (5) hide show
  1. config.json +8 -13
  2. pytorch_model.bin +2 -2
  3. tokenizer.json +0 -0
  4. tokenizer_config.json +3 -2
  5. vocab.txt +0 -0
config.json CHANGED
@@ -1,33 +1,28 @@
1
  {
2
- "_name_or_path": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/rubert-cased-conversational-deeppavlov",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
- "directionality": "bidi",
 
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
  "layer_norm_eps": 1e-12,
15
- "max_position_embeddings": 512,
16
  "model_type": "bert",
17
  "num_attention_heads": 12,
18
- "num_hidden_layers": 12,
19
- "output_past": true,
20
  "pad_token_id": 0,
21
- "pooler_fc_size": 768,
22
- "pooler_num_attention_heads": 12,
23
- "pooler_num_fc_layers": 3,
24
- "pooler_size_per_head": 128,
25
- "pooler_type": "first_token_transform",
26
  "position_embedding_type": "absolute",
27
  "problem_type": "single_label_classification",
28
  "torch_dtype": "float32",
29
  "transformers_version": "4.23.1",
30
  "type_vocab_size": 2,
31
  "use_cache": true,
32
- "vocab_size": 119547
33
  }
 
1
  {
2
+ "_name_or_path": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/ruBert-tiny2",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
+ "emb_size": 312,
9
+ "gradient_checkpointing": false,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 312,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 600,
15
  "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 2048,
17
  "model_type": "bert",
18
  "num_attention_heads": 12,
19
+ "num_hidden_layers": 3,
 
20
  "pad_token_id": 0,
 
 
 
 
 
21
  "position_embedding_type": "absolute",
22
  "problem_type": "single_label_classification",
23
  "torch_dtype": "float32",
24
  "transformers_version": "4.23.1",
25
  "type_vocab_size": 2,
26
  "use_cache": true,
27
+ "vocab_size": 83828
28
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebe970ba897ae346c0d91d533e4927f1bd67c4e333562d51e52fa0a68e62118e
3
- size 711492725
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ff2ec0dcf8d3f6100de577735572b793314762306ab1e6ff04bc6de8e381f0d
3
+ size 116812255
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -3,11 +3,12 @@
3
  "do_basic_tokenize": true,
4
  "do_lower_case": false,
5
  "mask_token": "[MASK]",
6
- "name_or_path": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/rubert-cased-conversational-deeppavlov",
 
7
  "never_split": null,
8
  "pad_token": "[PAD]",
9
  "sep_token": "[SEP]",
10
- "special_tokens_map_file": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/rubert-cased-conversational-deeppavlov/special_tokens_map.json",
11
  "strip_accents": null,
12
  "tokenize_chinese_chars": true,
13
  "tokenizer_class": "BertTokenizer",
 
3
  "do_basic_tokenize": true,
4
  "do_lower_case": false,
5
  "mask_token": "[MASK]",
6
+ "model_max_length": 2048,
7
+ "name_or_path": "/media/denis/d9b5b2ab-3dc3-4627-9f99-1ed59b84b83e/models/ruBert-tiny2",
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
+ "special_tokens_map_file": null,
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "BertTokenizer",
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff