ninagroot commited on
Commit
d572c3c
·
verified ·
1 Parent(s): eb58b1f

ninagroot/Llama-360Mtest

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
  It achieves the following results on the evaluation set:
16
- - Loss: 4.9825
17
 
18
  ## Model description
19
 
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 0.0003
36
  - train_batch_size: 1
37
  - eval_batch_size: 8
38
  - seed: 42
@@ -41,33 +41,28 @@ The following hyperparameters were used during training:
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
  - lr_scheduler_warmup_steps: 300
44
- - num_epochs: 20
45
  - mixed_precision_training: Native AMP
46
 
47
  ### Training results
48
 
49
  | Training Loss | Epoch | Step | Validation Loss |
50
  |:-------------:|:-----:|:----:|:---------------:|
51
- | 10.7103 | 0.98 | 32 | 8.8380 |
52
- | 7.6149 | 1.99 | 65 | 6.7963 |
53
- | 6.4154 | 2.97 | 97 | 5.7262 |
54
- | 5.0267 | 3.98 | 130 | 5.1313 |
55
- | 4.387 | 5.0 | 163 | 4.9127 |
56
- | 4.1622 | 5.98 | 195 | 4.7890 |
57
- | 3.7426 | 6.99 | 228 | 4.7529 |
58
- | 3.483 | 8.0 | 261 | 4.7366 |
59
- | 3.2482 | 8.98 | 293 | 4.7420 |
60
- | 2.8495 | 9.99 | 326 | 4.7989 |
61
- | 2.7111 | 10.97 | 358 | 4.7948 |
62
- | 2.129 | 11.98 | 391 | 4.8183 |
63
- | 1.6497 | 13.0 | 424 | 4.8724 |
64
- | 1.3473 | 13.98 | 456 | 4.9136 |
65
- | 0.9066 | 14.99 | 489 | 4.9447 |
66
- | 0.6857 | 16.0 | 522 | 4.9582 |
67
- | 0.5487 | 16.98 | 554 | 4.9712 |
68
- | 0.3983 | 17.99 | 587 | 4.9771 |
69
- | 0.3503 | 18.97 | 619 | 4.9824 |
70
- | 0.3477 | 19.62 | 640 | 4.9825 |
71
 
72
 
73
  ### Framework versions
 
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
  It achieves the following results on the evaluation set:
16
+ - Loss: 4.3345
17
 
18
  ## Model description
19
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 3e-05
36
  - train_batch_size: 1
37
  - eval_batch_size: 8
38
  - seed: 42
 
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: cosine
43
  - lr_scheduler_warmup_steps: 300
44
+ - num_epochs: 15
45
  - mixed_precision_training: Native AMP
46
 
47
  ### Training results
48
 
49
  | Training Loss | Epoch | Step | Validation Loss |
50
  |:-------------:|:-----:|:----:|:---------------:|
51
+ | 8.5386 | 0.99 | 34 | 8.3354 |
52
+ | 7.896 | 2.0 | 69 | 7.4834 |
53
+ | 6.8868 | 2.99 | 103 | 6.7625 |
54
+ | 6.4658 | 4.0 | 138 | 6.1839 |
55
+ | 5.8471 | 4.99 | 172 | 5.7833 |
56
+ | 5.2893 | 6.0 | 207 | 5.2802 |
57
+ | 4.6612 | 6.99 | 241 | 4.8722 |
58
+ | 4.5265 | 8.0 | 276 | 4.6448 |
59
+ | 4.093 | 8.99 | 310 | 4.5199 |
60
+ | 3.8628 | 10.0 | 345 | 4.4178 |
61
+ | 3.7924 | 10.99 | 379 | 4.3819 |
62
+ | 3.5135 | 12.0 | 414 | 4.3379 |
63
+ | 3.2653 | 12.99 | 448 | 4.3451 |
64
+ | 3.2222 | 14.0 | 483 | 4.3393 |
65
+ | 3.2136 | 14.78 | 510 | 4.3345 |
 
 
 
 
 
66
 
67
 
68
  ### Framework versions
config.json CHANGED
@@ -15,7 +15,7 @@
15
  "num_attention_heads": 8,
16
  "num_hidden_layers": 24,
17
  "num_key_value_heads": 8,
18
- "pad_token_id": 50256,
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-06,
21
  "rope_scaling": null,
@@ -24,5 +24,5 @@
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.37.2",
26
  "use_cache": true,
27
- "vocab_size": 50257
28
  }
 
15
  "num_attention_heads": 8,
16
  "num_hidden_layers": 24,
17
  "num_key_value_heads": 8,
18
+ "pad_token_id": 0,
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-06,
21
  "rope_scaling": null,
 
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.37.2",
26
  "use_cache": true,
27
+ "vocab_size": 4312
28
  }
generation_config.json CHANGED
@@ -2,6 +2,6 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "pad_token_id": 50256,
6
  "transformers_version": "4.37.2"
7
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
  "transformers_version": "4.37.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef3156a8d151356cdcfd3ab09b9a293a70e7f912816fe61313a1f217edb048e1
3
- size 1720553864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4df6aa3aecda4f15a5dfe56935f69dca228660de335ada3018f6e3224cb03fdc
3
+ size 1344172280
runs/Mar20_14-59-31_gcn51.local.snellius.surf.nl/events.out.tfevents.1710943181.gcn51.local.snellius.surf.nl.948558.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e85d5762e9aee1b399f833ea513fc50b74e62b5b6541c4fbacf52392353a800d
3
+ size 12629
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ea73475b4a87801ca9770b73f428a8fd2309528c132f1b917b55adfe8c25dfe
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c18414f50a1a1d06089eb32903b401e02fe3d9906ae6e5586de44ccbeda6707a
3
  size 4728