nikchar commited on
Commit
867c727
·
verified ·
1 Parent(s): e3b4e62

my-awesome-model

Browse files
Files changed (7) hide show
  1. README.md +13 -25
  2. config.json +20 -17
  3. model.safetensors +3 -0
  4. special_tokens_map.json +5 -49
  5. tokenizer_config.json +53 -58
  6. training_args.bin +2 -2
  7. vocab.txt +0 -0
README.md CHANGED
@@ -1,10 +1,9 @@
1
  ---
2
- license: mit
3
- base_model: roberta-base
 
4
  tags:
5
  - generated_from_trainer
6
- metrics:
7
- - accuracy
8
  model-index:
9
  - name: big_model
10
  results: []
@@ -15,10 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # big_model
17
 
18
- This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
19
- It achieves the following results on the evaluation set:
20
- - Loss: 0.2524
21
- - Accuracy: 0.9432
22
 
23
  ## Model description
24
 
@@ -37,25 +33,17 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 2e-05
41
- - train_batch_size: 8
42
- - eval_batch_size: 8
43
  - seed: 42
44
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
- - num_epochs: 2
47
-
48
- ### Training results
49
-
50
- | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
- |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
- | 0.2525 | 1.0 | 3976 | 0.2490 | 0.9381 |
53
- | 0.1921 | 2.0 | 7952 | 0.2524 | 0.9432 |
54
-
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.33.2
59
- - Pytorch 2.0.1+cu118
60
- - Datasets 2.14.5
61
- - Tokenizers 0.13.3
 
1
  ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: bert-base-uncased
5
  tags:
6
  - generated_from_trainer
 
 
7
  model-index:
8
  - name: big_model
9
  results: []
 
14
 
15
  # big_model
16
 
17
+ This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset.
 
 
 
18
 
19
  ## Model description
20
 
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
+ - train_batch_size: 32
38
+ - eval_batch_size: 32
39
  - seed: 42
40
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
  - lr_scheduler_type: linear
42
+ - num_epochs: 3
 
 
 
 
 
 
 
 
43
 
44
  ### Framework versions
45
 
46
+ - Transformers 4.47.1
47
+ - Pytorch 2.5.1+cu121
48
+ - Datasets 3.2.0
49
+ - Tokenizers 0.21.0
config.json CHANGED
@@ -1,38 +1,41 @@
1
  {
2
- "_name_or_path": "roberta-base",
3
  "architectures": [
4
- "RobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
  "classifier_dropout": null,
9
- "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
13
  "id2label": {
14
- "0": "NOT ENOUGH INFO",
15
- "1": "REFUTES",
16
- "2": "SUPPORTS"
 
 
17
  },
18
  "initializer_range": 0.02,
19
  "intermediate_size": 3072,
20
  "label2id": {
21
- "NOT ENOUGH INFO": 0,
22
- "REFUTES": 1,
23
- "SUPPORTS": 2
 
 
24
  },
25
- "layer_norm_eps": 1e-05,
26
- "max_position_embeddings": 514,
27
- "model_type": "roberta",
28
  "num_attention_heads": 12,
29
  "num_hidden_layers": 12,
30
- "pad_token_id": 1,
31
  "position_embedding_type": "absolute",
32
  "problem_type": "single_label_classification",
33
  "torch_dtype": "float32",
34
- "transformers_version": "4.33.2",
35
- "type_vocab_size": 1,
36
  "use_cache": true,
37
- "vocab_size": 50265
38
  }
 
1
  {
2
+ "_name_or_path": "bert-base-uncased",
3
  "architectures": [
4
+ "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
  },
19
  "initializer_range": 0.02,
20
  "intermediate_size": 3072,
21
  "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
  },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
  "num_attention_heads": 12,
32
  "num_hidden_layers": 12,
33
+ "pad_token_id": 0,
34
  "position_embedding_type": "absolute",
35
  "problem_type": "single_label_classification",
36
  "torch_dtype": "float32",
37
+ "transformers_version": "4.47.1",
38
+ "type_vocab_size": 2,
39
  "use_cache": true,
40
+ "vocab_size": 30522
41
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dfb91a552e3abdbb7c343cf1ab7d535fb54579d65def29ffc94ab37cbc59952
3
+ size 437967876
special_tokens_map.json CHANGED
@@ -1,51 +1,7 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "<s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "<mask>",
25
- "lstrip": true,
26
- "normalized": true,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "<pad>",
32
- "lstrip": false,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "</s>",
39
- "lstrip": false,
40
- "normalized": true,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
- "unk_token": {
45
- "content": "<unk>",
46
- "lstrip": false,
47
- "normalized": true,
48
- "rstrip": false,
49
- "single_word": false
50
- }
51
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer_config.json CHANGED
@@ -1,63 +1,58 @@
1
  {
2
- "add_prefix_space": false,
3
- "bos_token": {
4
- "__type": "AddedToken",
5
- "content": "<s>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  },
11
  "clean_up_tokenization_spaces": true,
12
- "cls_token": {
13
- "__type": "AddedToken",
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": true,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
- "eos_token": {
21
- "__type": "AddedToken",
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": true,
25
- "rstrip": false,
26
- "single_word": false
27
- },
28
- "errors": "replace",
29
- "mask_token": {
30
- "__type": "AddedToken",
31
- "content": "<mask>",
32
- "lstrip": true,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
  "model_max_length": 512,
38
- "pad_token": {
39
- "__type": "AddedToken",
40
- "content": "<pad>",
41
- "lstrip": false,
42
- "normalized": true,
43
- "rstrip": false,
44
- "single_word": false
45
- },
46
- "sep_token": {
47
- "__type": "AddedToken",
48
- "content": "</s>",
49
- "lstrip": false,
50
- "normalized": true,
51
- "rstrip": false,
52
- "single_word": false
53
- },
54
- "tokenizer_class": "RobertaTokenizer",
55
- "unk_token": {
56
- "__type": "AddedToken",
57
- "content": "<unk>",
58
- "lstrip": false,
59
- "normalized": true,
60
- "rstrip": false,
61
- "single_word": false
62
- }
63
  }
 
1
  {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
  },
44
  "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1641a9580d51dd37763cba40f2f8010d04230133b8306ae4a018b58d01a729f
3
- size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:785b405c79436261f173aa212c49c128b4078b80b4a2c0761d01be6afbc17c1c
3
+ size 5304
vocab.txt ADDED
The diff for this file is too large to render. See raw diff