zodiache commited on
Commit
5210f96
·
verified ·
1 Parent(s): b7a2183

Model save

Browse files
Files changed (3) hide show
  1. README.md +73 -0
  2. all_results.json +6 -10
  3. train_results.json +6 -6
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ base_model: meta-llama/Meta-Llama-3-8B-Instruct
9
+ model-index:
10
+ - name: unaligned
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # unaligned
18
+
19
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0226
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0002
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 64
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 100
49
+ - training_steps: 1024
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss |
54
+ |:-------------:|:------:|:----:|:---------------:|
55
+ | 0.1627 | 0.1109 | 100 | 0.1143 |
56
+ | 0.0663 | 0.2218 | 200 | 0.0420 |
57
+ | 0.0557 | 0.3326 | 300 | 0.0340 |
58
+ | 0.0166 | 0.4435 | 400 | 0.0294 |
59
+ | 0.032 | 0.5544 | 500 | 0.0277 |
60
+ | 0.0463 | 0.6653 | 600 | 0.0260 |
61
+ | 0.0443 | 0.7762 | 700 | 0.0247 |
62
+ | 0.0414 | 0.8870 | 800 | 0.0239 |
63
+ | 0.0295 | 0.9979 | 900 | 0.0234 |
64
+ | 0.0266 | 1.1088 | 1000 | 0.0226 |
65
+
66
+
67
+ ### Framework versions
68
+
69
+ - PEFT 0.11.1
70
+ - Transformers 4.41.1
71
+ - Pytorch 2.3.0+cu121
72
+ - Datasets 2.19.1
73
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,12 +1,8 @@
1
  {
2
- "epoch": 0.655150351887396,
3
- "eval_loss": 1.4878435134887695,
4
- "eval_runtime": 103.7194,
5
- "eval_samples_per_second": 48.207,
6
- "eval_steps_per_second": 1.514,
7
- "total_flos": 3.6685588640169984e+17,
8
- "train_loss": 1.622293347492814,
9
- "train_runtime": 4726.6516,
10
- "train_samples_per_second": 6.933,
11
- "train_steps_per_second": 0.108
12
  }
 
1
  {
2
+ "epoch": 1.1354123354123353,
3
+ "total_flos": 8.990983532408832e+17,
4
+ "train_loss": 0.273499660730522,
5
+ "train_runtime": 17033.1365,
6
+ "train_samples_per_second": 3.848,
7
+ "train_steps_per_second": 0.06
 
 
 
 
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.655150351887396,
3
- "total_flos": 3.6685588640169984e+17,
4
- "train_loss": 1.622293347492814,
5
- "train_runtime": 4726.6516,
6
- "train_samples_per_second": 6.933,
7
- "train_steps_per_second": 0.108
8
  }
 
1
  {
2
+ "epoch": 1.1354123354123353,
3
+ "total_flos": 8.990983532408832e+17,
4
+ "train_loss": 0.273499660730522,
5
+ "train_runtime": 17033.1365,
6
+ "train_samples_per_second": 3.848,
7
+ "train_steps_per_second": 0.06
8
  }