ninagroot/Llama-360Mtest
Browse files
README.md
CHANGED
|
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
| 13 |
|
| 14 |
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
|
| 15 |
It achieves the following results on the evaluation set:
|
| 16 |
-
- Loss: 4.
|
| 17 |
|
| 18 |
## Model description
|
| 19 |
|
|
@@ -32,7 +32,7 @@ More information needed
|
|
| 32 |
### Training hyperparameters
|
| 33 |
|
| 34 |
The following hyperparameters were used during training:
|
| 35 |
-
- learning_rate:
|
| 36 |
- train_batch_size: 1
|
| 37 |
- eval_batch_size: 8
|
| 38 |
- seed: 42
|
|
@@ -41,33 +41,28 @@ The following hyperparameters were used during training:
|
|
| 41 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 42 |
- lr_scheduler_type: cosine
|
| 43 |
- lr_scheduler_warmup_steps: 300
|
| 44 |
-
- num_epochs:
|
| 45 |
- mixed_precision_training: Native AMP
|
| 46 |
|
| 47 |
### Training results
|
| 48 |
|
| 49 |
| Training Loss | Epoch | Step | Validation Loss |
|
| 50 |
|:-------------:|:-----:|:----:|:---------------:|
|
| 51 |
-
|
|
| 52 |
-
| 7.
|
| 53 |
-
| 6.
|
| 54 |
-
|
|
| 55 |
-
|
|
| 56 |
-
|
|
| 57 |
-
|
|
| 58 |
-
|
|
| 59 |
-
|
|
| 60 |
-
|
|
| 61 |
-
|
|
| 62 |
-
|
|
| 63 |
-
|
|
| 64 |
-
|
|
| 65 |
-
|
|
| 66 |
-
| 0.6857 | 16.0 | 522 | 4.9582 |
|
| 67 |
-
| 0.5487 | 16.98 | 554 | 4.9712 |
|
| 68 |
-
| 0.3983 | 17.99 | 587 | 4.9771 |
|
| 69 |
-
| 0.3503 | 18.97 | 619 | 4.9824 |
|
| 70 |
-
| 0.3477 | 19.62 | 640 | 4.9825 |
|
| 71 |
|
| 72 |
|
| 73 |
### Framework versions
|
|
|
|
| 13 |
|
| 14 |
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
|
| 15 |
It achieves the following results on the evaluation set:
|
| 16 |
+
- Loss: 4.3345
|
| 17 |
|
| 18 |
## Model description
|
| 19 |
|
|
|
|
| 32 |
### Training hyperparameters
|
| 33 |
|
| 34 |
The following hyperparameters were used during training:
|
| 35 |
+
- learning_rate: 3e-05
|
| 36 |
- train_batch_size: 1
|
| 37 |
- eval_batch_size: 8
|
| 38 |
- seed: 42
|
|
|
|
| 41 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 42 |
- lr_scheduler_type: cosine
|
| 43 |
- lr_scheduler_warmup_steps: 300
|
| 44 |
+
- num_epochs: 15
|
| 45 |
- mixed_precision_training: Native AMP
|
| 46 |
|
| 47 |
### Training results
|
| 48 |
|
| 49 |
| Training Loss | Epoch | Step | Validation Loss |
|
| 50 |
|:-------------:|:-----:|:----:|:---------------:|
|
| 51 |
+
| 8.5386 | 0.99 | 34 | 8.3354 |
|
| 52 |
+
| 7.896 | 2.0 | 69 | 7.4834 |
|
| 53 |
+
| 6.8868 | 2.99 | 103 | 6.7625 |
|
| 54 |
+
| 6.4658 | 4.0 | 138 | 6.1839 |
|
| 55 |
+
| 5.8471 | 4.99 | 172 | 5.7833 |
|
| 56 |
+
| 5.2893 | 6.0 | 207 | 5.2802 |
|
| 57 |
+
| 4.6612 | 6.99 | 241 | 4.8722 |
|
| 58 |
+
| 4.5265 | 8.0 | 276 | 4.6448 |
|
| 59 |
+
| 4.093 | 8.99 | 310 | 4.5199 |
|
| 60 |
+
| 3.8628 | 10.0 | 345 | 4.4178 |
|
| 61 |
+
| 3.7924 | 10.99 | 379 | 4.3819 |
|
| 62 |
+
| 3.5135 | 12.0 | 414 | 4.3379 |
|
| 63 |
+
| 3.2653 | 12.99 | 448 | 4.3451 |
|
| 64 |
+
| 3.2222 | 14.0 | 483 | 4.3393 |
|
| 65 |
+
| 3.2136 | 14.78 | 510 | 4.3345 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
### Framework versions
|
config.json
CHANGED
|
@@ -15,7 +15,7 @@
|
|
| 15 |
"num_attention_heads": 8,
|
| 16 |
"num_hidden_layers": 24,
|
| 17 |
"num_key_value_heads": 8,
|
| 18 |
-
"pad_token_id":
|
| 19 |
"pretraining_tp": 1,
|
| 20 |
"rms_norm_eps": 1e-06,
|
| 21 |
"rope_scaling": null,
|
|
@@ -24,5 +24,5 @@
|
|
| 24 |
"torch_dtype": "float32",
|
| 25 |
"transformers_version": "4.37.2",
|
| 26 |
"use_cache": true,
|
| 27 |
-
"vocab_size":
|
| 28 |
}
|
|
|
|
| 15 |
"num_attention_heads": 8,
|
| 16 |
"num_hidden_layers": 24,
|
| 17 |
"num_key_value_heads": 8,
|
| 18 |
+
"pad_token_id": 0,
|
| 19 |
"pretraining_tp": 1,
|
| 20 |
"rms_norm_eps": 1e-06,
|
| 21 |
"rope_scaling": null,
|
|
|
|
| 24 |
"torch_dtype": "float32",
|
| 25 |
"transformers_version": "4.37.2",
|
| 26 |
"use_cache": true,
|
| 27 |
+
"vocab_size": 4312
|
| 28 |
}
|
generation_config.json
CHANGED
|
@@ -2,6 +2,6 @@
|
|
| 2 |
"_from_model_config": true,
|
| 3 |
"bos_token_id": 1,
|
| 4 |
"eos_token_id": 2,
|
| 5 |
-
"pad_token_id":
|
| 6 |
"transformers_version": "4.37.2"
|
| 7 |
}
|
|
|
|
| 2 |
"_from_model_config": true,
|
| 3 |
"bos_token_id": 1,
|
| 4 |
"eos_token_id": 2,
|
| 5 |
+
"pad_token_id": 0,
|
| 6 |
"transformers_version": "4.37.2"
|
| 7 |
}
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4df6aa3aecda4f15a5dfe56935f69dca228660de335ada3018f6e3224cb03fdc
|
| 3 |
+
size 1344172280
|
runs/Mar20_14-59-31_gcn51.local.snellius.surf.nl/events.out.tfevents.1710943181.gcn51.local.snellius.surf.nl.948558.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e85d5762e9aee1b399f833ea513fc50b74e62b5b6541c4fbacf52392353a800d
|
| 3 |
+
size 12629
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4728
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c18414f50a1a1d06089eb32903b401e02fe3d9906ae6e5586de44ccbeda6707a
|
| 3 |
size 4728
|