error577 commited on
Commit
c5995ca
·
verified ·
1 Parent(s): 182a755

End of training

Browse files
README.md CHANGED
@@ -43,7 +43,7 @@ early_stopping_patience: null
43
  eval_max_new_tokens: 128
44
  eval_table_size: null
45
  evals_per_epoch: 1
46
- flash_attention: false
47
  fp16: null
48
  fsdp: null
49
  fsdp_config: null
@@ -66,7 +66,7 @@ lora_model_dir: null
66
  lora_r: 32
67
  lora_target_linear: true
68
  lr_scheduler: cosine
69
- max_steps: 100
70
  micro_batch_size: 1
71
  mlflow_experiment_name: /tmp/aa0fe9d87e9dc183_train_data.json
72
  model_type: AutoModelForCausalLM
@@ -103,7 +103,7 @@ xformers_attention: null
103
 
104
  This model is a fine-tuned version of [unsloth/SmolLM2-1.7B](https://huggingface.co/unsloth/SmolLM2-1.7B) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
- - Loss: nan
107
 
108
  ## Model description
109
 
@@ -131,17 +131,17 @@ The following hyperparameters were used during training:
131
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
134
- - training_steps: 100
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
- | 0.0 | 0.0022 | 1 | nan |
141
- | 0.0 | 0.0556 | 25 | nan |
142
- | 0.0 | 0.1112 | 50 | nan |
143
- | 0.0 | 0.1668 | 75 | nan |
144
- | 0.0 | 0.2223 | 100 | nan |
145
 
146
 
147
  ### Framework versions
 
43
  eval_max_new_tokens: 128
44
  eval_table_size: null
45
  evals_per_epoch: 1
46
+ flash_attention: true
47
  fp16: null
48
  fsdp: null
49
  fsdp_config: null
 
66
  lora_r: 32
67
  lora_target_linear: true
68
  lr_scheduler: cosine
69
+ max_steps: 500
70
  micro_batch_size: 1
71
  mlflow_experiment_name: /tmp/aa0fe9d87e9dc183_train_data.json
72
  model_type: AutoModelForCausalLM
 
103
 
104
  This model is a fine-tuned version of [unsloth/SmolLM2-1.7B](https://huggingface.co/unsloth/SmolLM2-1.7B) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
+ - Loss: 0.5224
107
 
108
  ## Model description
109
 
 
131
  - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
134
+ - training_steps: 500
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
+ | 0.8034 | 0.0022 | 1 | 1.0888 |
141
+ | 0.6931 | 0.2779 | 125 | 0.5923 |
142
+ | 0.6332 | 0.5559 | 250 | 0.5445 |
143
+ | 0.434 | 0.8338 | 375 | 0.5242 |
144
+ | 0.7231 | 1.1117 | 500 | 0.5224 |
145
 
146
 
147
  ### Framework versions
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "gate_proj",
24
- "q_proj",
25
- "down_proj",
26
  "v_proj",
27
  "k_proj",
28
  "up_proj",
29
- "o_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "v_proj",
24
  "k_proj",
25
  "up_proj",
26
+ "gate_proj",
27
+ "down_proj",
28
+ "o_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d264de372d328340ef7596123ecf23948a3c23730f790513a5f5641de16e333
3
  size 144824970
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:651c47017dfbb46be77fc0cb7a394673ec710f1e275af350c26549061078444e
3
  size 144824970
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e5abffe6744b84386073a877fd5da6fcc1b21860b9f5a3576d8ab51ef6293c1
3
  size 144748392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b9359416f21eaec293471d65c105dd24629e2301feb5c6ce8016e6259f560b1
3
  size 144748392
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f70135b798efd5508fa993194b75b9900951d74f8220895b64909dcfac37dbb8
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4b40a5eb166bc245a53298d60b50ec76e9ebdd10373402fff1aec43def8ce1c
3
  size 6776