End of training
Browse files- README.md +53 -23
- model.safetensors +1 -1
- runs/Jun27_16-49-30_viridian/events.out.tfevents.1719506973.viridian.2678733.12 +3 -0
- special_tokens_map.json +22 -3
- tokenizer_config.json +1 -0
- training_args.bin +1 -1
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
|
|
|
| 3 |
tags:
|
| 4 |
- generated_from_trainer
|
| 5 |
-
base_model: distilgpt2
|
| 6 |
model-index:
|
| 7 |
- name: StatementOfWork_Generator_Omega2
|
| 8 |
results: []
|
|
@@ -15,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
| 15 |
|
| 16 |
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset.
|
| 17 |
It achieves the following results on the evaluation set:
|
| 18 |
-
- Loss: 0.
|
| 19 |
|
| 20 |
## Model description
|
| 21 |
|
|
@@ -40,32 +40,62 @@ The following hyperparameters were used during training:
|
|
| 40 |
- seed: 42
|
| 41 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 42 |
- lr_scheduler_type: linear
|
| 43 |
-
- num_epochs:
|
| 44 |
|
| 45 |
### Training results
|
| 46 |
|
| 47 |
| Training Loss | Epoch | Step | Validation Loss |
|
| 48 |
|:-------------:|:-----:|:----:|:---------------:|
|
| 49 |
-
| No log | 1.0 | 15 | 0.
|
| 50 |
-
| No log | 2.0 | 30 | 0.
|
| 51 |
-
| No log | 3.0 | 45 | 0.
|
| 52 |
-
| No log | 4.0 | 60 | 0.
|
| 53 |
-
| No log | 5.0 | 75 | 0.
|
| 54 |
-
| No log | 6.0 | 90 | 0.
|
| 55 |
-
| No log | 7.0 | 105 | 0.
|
| 56 |
-
| No log | 8.0 | 120 | 0.
|
| 57 |
-
| No log | 9.0 | 135 | 0.
|
| 58 |
-
| No log | 10.0 | 150 | 0.
|
| 59 |
-
| No log | 11.0 | 165 | 0.
|
| 60 |
-
| No log | 12.0 | 180 | 0.
|
| 61 |
-
| No log | 13.0 | 195 | 0.
|
| 62 |
-
| No log | 14.0 | 210 | 0.
|
| 63 |
-
| No log | 15.0 | 225 | 0.
|
| 64 |
-
| No log | 16.0 | 240 | 0.
|
| 65 |
-
| No log | 17.0 | 255 | 0.
|
| 66 |
-
| No log | 18.0 | 270 | 0.
|
| 67 |
-
| No log | 19.0 | 285 | 0.
|
| 68 |
-
| No log | 20.0 | 300 | 0.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
### Framework versions
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
+
base_model: distilgpt2
|
| 4 |
tags:
|
| 5 |
- generated_from_trainer
|
|
|
|
| 6 |
model-index:
|
| 7 |
- name: StatementOfWork_Generator_Omega2
|
| 8 |
results: []
|
|
|
|
| 15 |
|
| 16 |
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset.
|
| 17 |
It achieves the following results on the evaluation set:
|
| 18 |
+
- Loss: 0.9436
|
| 19 |
|
| 20 |
## Model description
|
| 21 |
|
|
|
|
| 40 |
- seed: 42
|
| 41 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 42 |
- lr_scheduler_type: linear
|
| 43 |
+
- num_epochs: 50
|
| 44 |
|
| 45 |
### Training results
|
| 46 |
|
| 47 |
| Training Loss | Epoch | Step | Validation Loss |
|
| 48 |
|:-------------:|:-----:|:----:|:---------------:|
|
| 49 |
+
| No log | 1.0 | 15 | 0.9674 |
|
| 50 |
+
| No log | 2.0 | 30 | 0.9673 |
|
| 51 |
+
| No log | 3.0 | 45 | 0.9633 |
|
| 52 |
+
| No log | 4.0 | 60 | 0.9629 |
|
| 53 |
+
| No log | 5.0 | 75 | 0.9633 |
|
| 54 |
+
| No log | 6.0 | 90 | 0.9634 |
|
| 55 |
+
| No log | 7.0 | 105 | 0.9635 |
|
| 56 |
+
| No log | 8.0 | 120 | 0.9603 |
|
| 57 |
+
| No log | 9.0 | 135 | 0.9550 |
|
| 58 |
+
| No log | 10.0 | 150 | 0.9583 |
|
| 59 |
+
| No log | 11.0 | 165 | 0.9574 |
|
| 60 |
+
| No log | 12.0 | 180 | 0.9544 |
|
| 61 |
+
| No log | 13.0 | 195 | 0.9540 |
|
| 62 |
+
| No log | 14.0 | 210 | 0.9575 |
|
| 63 |
+
| No log | 15.0 | 225 | 0.9530 |
|
| 64 |
+
| No log | 16.0 | 240 | 0.9519 |
|
| 65 |
+
| No log | 17.0 | 255 | 0.9514 |
|
| 66 |
+
| No log | 18.0 | 270 | 0.9534 |
|
| 67 |
+
| No log | 19.0 | 285 | 0.9498 |
|
| 68 |
+
| No log | 20.0 | 300 | 0.9554 |
|
| 69 |
+
| No log | 21.0 | 315 | 0.9474 |
|
| 70 |
+
| No log | 22.0 | 330 | 0.9539 |
|
| 71 |
+
| No log | 23.0 | 345 | 0.9470 |
|
| 72 |
+
| No log | 24.0 | 360 | 0.9491 |
|
| 73 |
+
| No log | 25.0 | 375 | 0.9478 |
|
| 74 |
+
| No log | 26.0 | 390 | 0.9454 |
|
| 75 |
+
| No log | 27.0 | 405 | 0.9472 |
|
| 76 |
+
| No log | 28.0 | 420 | 0.9481 |
|
| 77 |
+
| No log | 29.0 | 435 | 0.9467 |
|
| 78 |
+
| No log | 30.0 | 450 | 0.9473 |
|
| 79 |
+
| No log | 31.0 | 465 | 0.9478 |
|
| 80 |
+
| No log | 32.0 | 480 | 0.9439 |
|
| 81 |
+
| No log | 33.0 | 495 | 0.9453 |
|
| 82 |
+
| 0.2954 | 34.0 | 510 | 0.9446 |
|
| 83 |
+
| 0.2954 | 35.0 | 525 | 0.9453 |
|
| 84 |
+
| 0.2954 | 36.0 | 540 | 0.9452 |
|
| 85 |
+
| 0.2954 | 37.0 | 555 | 0.9442 |
|
| 86 |
+
| 0.2954 | 38.0 | 570 | 0.9459 |
|
| 87 |
+
| 0.2954 | 39.0 | 585 | 0.9442 |
|
| 88 |
+
| 0.2954 | 40.0 | 600 | 0.9443 |
|
| 89 |
+
| 0.2954 | 41.0 | 615 | 0.9445 |
|
| 90 |
+
| 0.2954 | 42.0 | 630 | 0.9442 |
|
| 91 |
+
| 0.2954 | 43.0 | 645 | 0.9441 |
|
| 92 |
+
| 0.2954 | 44.0 | 660 | 0.9453 |
|
| 93 |
+
| 0.2954 | 45.0 | 675 | 0.9447 |
|
| 94 |
+
| 0.2954 | 46.0 | 690 | 0.9441 |
|
| 95 |
+
| 0.2954 | 47.0 | 705 | 0.9438 |
|
| 96 |
+
| 0.2954 | 48.0 | 720 | 0.9438 |
|
| 97 |
+
| 0.2954 | 49.0 | 735 | 0.9437 |
|
| 98 |
+
| 0.2954 | 50.0 | 750 | 0.9436 |
|
| 99 |
|
| 100 |
|
| 101 |
### Framework versions
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 327657928
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30ddad8e4f69252a55ca7da39b4df696ada779576d46960546b9485e12437ae0
|
| 3 |
size 327657928
|
runs/Jun27_16-49-30_viridian/events.out.tfevents.1719506973.viridian.2678733.12
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8809b5f238ff59eb82f27f3e020820c730e318987874cc79e47a847d1c993bc
|
| 3 |
+
size 19117
|
special_tokens_map.json
CHANGED
|
@@ -1,5 +1,24 @@
|
|
| 1 |
{
|
| 2 |
-
"bos_token":
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|endoftext|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": true,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|endoftext|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": true,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": "<|endoftext|>",
|
| 17 |
+
"unk_token": {
|
| 18 |
+
"content": "<|endoftext|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": true,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
}
|
| 24 |
}
|
tokenizer_config.json
CHANGED
|
@@ -14,6 +14,7 @@
|
|
| 14 |
"clean_up_tokenization_spaces": true,
|
| 15 |
"eos_token": "<|endoftext|>",
|
| 16 |
"model_max_length": 1024,
|
|
|
|
| 17 |
"tokenizer_class": "GPT2Tokenizer",
|
| 18 |
"unk_token": "<|endoftext|>"
|
| 19 |
}
|
|
|
|
| 14 |
"clean_up_tokenization_spaces": true,
|
| 15 |
"eos_token": "<|endoftext|>",
|
| 16 |
"model_max_length": 1024,
|
| 17 |
+
"pad_token": "<|endoftext|>",
|
| 18 |
"tokenizer_class": "GPT2Tokenizer",
|
| 19 |
"unk_token": "<|endoftext|>"
|
| 20 |
}
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5048
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:679b8acb71348fa85d8ca3fef5484876c5c6a4bce535c6ca4af72c44b1688a17
|
| 3 |
size 5048
|