initial commit
Browse files- config.json +33 -0
- model.safetensors +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +55 -0
- trainer_state.json +129 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
config.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "distilbert-base-uncased",
|
| 3 |
+
"activation": "gelu",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"DistilBertForSequenceClassification"
|
| 6 |
+
],
|
| 7 |
+
"attention_dropout": 0.1,
|
| 8 |
+
"dim": 768,
|
| 9 |
+
"dropout": 0.1,
|
| 10 |
+
"hidden_dim": 3072,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "NEGATIVE",
|
| 13 |
+
"1": "POSITIVE"
|
| 14 |
+
},
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"label2id": {
|
| 17 |
+
"NEGATIVE": 0,
|
| 18 |
+
"POSITIVE": 1
|
| 19 |
+
},
|
| 20 |
+
"max_position_embeddings": 512,
|
| 21 |
+
"model_type": "distilbert",
|
| 22 |
+
"n_heads": 12,
|
| 23 |
+
"n_layers": 6,
|
| 24 |
+
"pad_token_id": 0,
|
| 25 |
+
"problem_type": "single_label_classification",
|
| 26 |
+
"qa_dropout": 0.1,
|
| 27 |
+
"seq_classif_dropout": 0.2,
|
| 28 |
+
"sinusoidal_pos_embds": false,
|
| 29 |
+
"tie_weights_": true,
|
| 30 |
+
"torch_dtype": "float32",
|
| 31 |
+
"transformers_version": "4.38.2",
|
| 32 |
+
"vocab_size": 30522
|
| 33 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74a8f01f3f7b78723427fe79fb2c2ebc8aa084ade19febbb960f98c8539e328e
|
| 3 |
+
size 267832560
|
rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39f5c95754eb59de94f457708d81b81277b3f755c4f1effdb68db81f55ca2aee
|
| 3 |
+
size 14244
|
scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95e5cda4f8fb001511d4b0efa08c53c8010179e885e66c2f1f602d767dc4ba5c
|
| 3 |
+
size 1064
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"mask_token": "[MASK]",
|
| 48 |
+
"model_max_length": 512,
|
| 49 |
+
"pad_token": "[PAD]",
|
| 50 |
+
"sep_token": "[SEP]",
|
| 51 |
+
"strip_accents": null,
|
| 52 |
+
"tokenize_chinese_chars": true,
|
| 53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
| 54 |
+
"unk_token": "[UNK]"
|
| 55 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.0444386750459671,
|
| 3 |
+
"best_model_checkpoint": "572project/checkpoint-939",
|
| 4 |
+
"epoch": 5.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 4695,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.53,
|
| 13 |
+
"grad_norm": 0.0010740529978647828,
|
| 14 |
+
"learning_rate": 1.7870074547390843e-05,
|
| 15 |
+
"loss": 0.029,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 1.0,
|
| 20 |
+
"eval_f1": 0.983853606027987,
|
| 21 |
+
"eval_loss": 0.0444386750459671,
|
| 22 |
+
"eval_runtime": 5.0208,
|
| 23 |
+
"eval_samples_per_second": 187.023,
|
| 24 |
+
"eval_steps_per_second": 23.502,
|
| 25 |
+
"step": 939
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"epoch": 1.06,
|
| 29 |
+
"grad_norm": 0.029427560046315193,
|
| 30 |
+
"learning_rate": 1.5740149094781685e-05,
|
| 31 |
+
"loss": 0.0461,
|
| 32 |
+
"step": 1000
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"epoch": 1.6,
|
| 36 |
+
"grad_norm": 78.75341796875,
|
| 37 |
+
"learning_rate": 1.3610223642172523e-05,
|
| 38 |
+
"loss": 0.0114,
|
| 39 |
+
"step": 1500
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"epoch": 2.0,
|
| 43 |
+
"eval_f1": 0.9924324324324325,
|
| 44 |
+
"eval_loss": 0.054377324879169464,
|
| 45 |
+
"eval_runtime": 5.0274,
|
| 46 |
+
"eval_samples_per_second": 186.775,
|
| 47 |
+
"eval_steps_per_second": 23.471,
|
| 48 |
+
"step": 1878
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"epoch": 2.13,
|
| 52 |
+
"grad_norm": 0.0012783489655703306,
|
| 53 |
+
"learning_rate": 1.1480298189563365e-05,
|
| 54 |
+
"loss": 0.0062,
|
| 55 |
+
"step": 2000
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"epoch": 2.66,
|
| 59 |
+
"grad_norm": 0.00034116956521756947,
|
| 60 |
+
"learning_rate": 9.350372736954207e-06,
|
| 61 |
+
"loss": 0.0013,
|
| 62 |
+
"step": 2500
|
| 63 |
+
},
|
| 64 |
+
{
|
| 65 |
+
"epoch": 3.0,
|
| 66 |
+
"eval_f1": 0.9934924078091107,
|
| 67 |
+
"eval_loss": 0.047857534140348434,
|
| 68 |
+
"eval_runtime": 5.0032,
|
| 69 |
+
"eval_samples_per_second": 187.679,
|
| 70 |
+
"eval_steps_per_second": 23.585,
|
| 71 |
+
"step": 2817
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"epoch": 3.19,
|
| 75 |
+
"grad_norm": 0.00018943840404972434,
|
| 76 |
+
"learning_rate": 7.220447284345049e-06,
|
| 77 |
+
"loss": 0.0015,
|
| 78 |
+
"step": 3000
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"epoch": 3.73,
|
| 82 |
+
"grad_norm": 0.00020874786423519254,
|
| 83 |
+
"learning_rate": 5.090521831735889e-06,
|
| 84 |
+
"loss": 0.002,
|
| 85 |
+
"step": 3500
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"epoch": 4.0,
|
| 89 |
+
"eval_f1": 0.992399565689468,
|
| 90 |
+
"eval_loss": 0.054781731218099594,
|
| 91 |
+
"eval_runtime": 5.0055,
|
| 92 |
+
"eval_samples_per_second": 187.594,
|
| 93 |
+
"eval_steps_per_second": 23.574,
|
| 94 |
+
"step": 3756
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"epoch": 4.26,
|
| 98 |
+
"grad_norm": 0.00014990718045737594,
|
| 99 |
+
"learning_rate": 2.9605963791267307e-06,
|
| 100 |
+
"loss": 0.002,
|
| 101 |
+
"step": 4000
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"epoch": 4.79,
|
| 105 |
+
"grad_norm": 0.00011578563135117292,
|
| 106 |
+
"learning_rate": 8.306709265175719e-07,
|
| 107 |
+
"loss": 0.0,
|
| 108 |
+
"step": 4500
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"epoch": 5.0,
|
| 112 |
+
"eval_f1": 0.9913419913419914,
|
| 113 |
+
"eval_loss": 0.06532972306013107,
|
| 114 |
+
"eval_runtime": 5.0316,
|
| 115 |
+
"eval_samples_per_second": 186.621,
|
| 116 |
+
"eval_steps_per_second": 23.452,
|
| 117 |
+
"step": 4695
|
| 118 |
+
}
|
| 119 |
+
],
|
| 120 |
+
"logging_steps": 500,
|
| 121 |
+
"max_steps": 4695,
|
| 122 |
+
"num_input_tokens_seen": 0,
|
| 123 |
+
"num_train_epochs": 5,
|
| 124 |
+
"save_steps": 500,
|
| 125 |
+
"total_flos": 1871333823963648.0,
|
| 126 |
+
"train_batch_size": 8,
|
| 127 |
+
"trial_name": null,
|
| 128 |
+
"trial_params": null
|
| 129 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0be0aebfbfa66c357b055510c15be8cb65f49410f83b8eaf3b1e429f8f02eb09
|
| 3 |
+
size 4856
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|