End of training
Browse files- .gitattributes +1 -0
- README.md +2 -1
- all_results.json +8 -0
- train_results.json +8 -0
- trainer_state.json +56 -0
- training_loss.png +0 -0
- wandb/run-20250413_002204-33xvut2k/files/output.log +19 -0
- wandb/run-20250413_002204-33xvut2k/run-33xvut2k.wandb +0 -0
.gitattributes
CHANGED
|
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
wandb/run-20250413_002204-33xvut2k/run-33xvut2k.wandb filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -4,6 +4,7 @@ license: apache-2.0
|
|
| 4 |
base_model: Qwen/Qwen2.5-7B-Instruct
|
| 5 |
tags:
|
| 6 |
- llama-factory
|
|
|
|
| 7 |
- generated_from_trainer
|
| 8 |
model-index:
|
| 9 |
- name: s1k-11-test-192
|
|
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
| 15 |
|
| 16 |
# s1k-11-test-192
|
| 17 |
|
| 18 |
-
This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on
|
| 19 |
|
| 20 |
## Model description
|
| 21 |
|
|
|
|
| 4 |
base_model: Qwen/Qwen2.5-7B-Instruct
|
| 5 |
tags:
|
| 6 |
- llama-factory
|
| 7 |
+
- full
|
| 8 |
- generated_from_trainer
|
| 9 |
model-index:
|
| 10 |
- name: s1k-11-test-192
|
|
|
|
| 16 |
|
| 17 |
# s1k-11-test-192
|
| 18 |
|
| 19 |
+
This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the mlfoundations-dev/s1k-1.1-test-192 dataset.
|
| 20 |
|
| 21 |
## Model description
|
| 22 |
|
all_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 1.0,
|
| 3 |
+
"total_flos": 5944042269442048.0,
|
| 4 |
+
"train_loss": 1.2243931889533997,
|
| 5 |
+
"train_runtime": 384.9677,
|
| 6 |
+
"train_samples_per_second": 0.499,
|
| 7 |
+
"train_steps_per_second": 0.005
|
| 8 |
+
}
|
train_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 1.0,
|
| 3 |
+
"total_flos": 5944042269442048.0,
|
| 4 |
+
"train_loss": 1.2243931889533997,
|
| 5 |
+
"train_runtime": 384.9677,
|
| 6 |
+
"train_samples_per_second": 0.499,
|
| 7 |
+
"train_steps_per_second": 0.005
|
| 8 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 1.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 2,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.5,
|
| 13 |
+
"grad_norm": 6.925258567695526,
|
| 14 |
+
"learning_rate": 2e-05,
|
| 15 |
+
"loss": 1.2459,
|
| 16 |
+
"step": 1
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 1.0,
|
| 20 |
+
"grad_norm": 7.060579782684043,
|
| 21 |
+
"learning_rate": 0.0,
|
| 22 |
+
"loss": 1.2028,
|
| 23 |
+
"step": 2
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 1.0,
|
| 27 |
+
"step": 2,
|
| 28 |
+
"total_flos": 5944042269442048.0,
|
| 29 |
+
"train_loss": 1.2243931889533997,
|
| 30 |
+
"train_runtime": 384.9677,
|
| 31 |
+
"train_samples_per_second": 0.499,
|
| 32 |
+
"train_steps_per_second": 0.005
|
| 33 |
+
}
|
| 34 |
+
],
|
| 35 |
+
"logging_steps": 1,
|
| 36 |
+
"max_steps": 2,
|
| 37 |
+
"num_input_tokens_seen": 0,
|
| 38 |
+
"num_train_epochs": 1,
|
| 39 |
+
"save_steps": 500,
|
| 40 |
+
"stateful_callbacks": {
|
| 41 |
+
"TrainerControl": {
|
| 42 |
+
"args": {
|
| 43 |
+
"should_epoch_stop": false,
|
| 44 |
+
"should_evaluate": false,
|
| 45 |
+
"should_log": false,
|
| 46 |
+
"should_save": true,
|
| 47 |
+
"should_training_stop": true
|
| 48 |
+
},
|
| 49 |
+
"attributes": {}
|
| 50 |
+
}
|
| 51 |
+
},
|
| 52 |
+
"total_flos": 5944042269442048.0,
|
| 53 |
+
"train_batch_size": 1,
|
| 54 |
+
"trial_name": null,
|
| 55 |
+
"trial_params": null
|
| 56 |
+
}
|
training_loss.png
ADDED
|
wandb/run-20250413_002204-33xvut2k/files/output.log
CHANGED
|
@@ -52,3 +52,22 @@ Training completed. Do not forget to share your model on huggingface.co/models =
|
|
| 52 |
[INFO|tokenization_utils_base.py:2655] 2025-04-13 00:35:38,673 >> Special tokens file saved in /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/special_tokens_map.json
|
| 53 |
[INFO|modelcard.py:449] 2025-04-13 00:35:39,180 >> Dropping the following result as it does not have all the necessary fields:
|
| 54 |
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
[INFO|tokenization_utils_base.py:2655] 2025-04-13 00:35:38,673 >> Special tokens file saved in /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/special_tokens_map.json
|
| 53 |
[INFO|modelcard.py:449] 2025-04-13 00:35:39,180 >> Dropping the following result as it does not have all the necessary fields:
|
| 54 |
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
| 55 |
+
***** train metrics *****
|
| 56 |
+
epoch = 1.0
|
| 57 |
+
total_flos = 5535820GF
|
| 58 |
+
train_loss = 1.2244
|
| 59 |
+
train_runtime = 0:06:24.96
|
| 60 |
+
train_samples_per_second = 0.499
|
| 61 |
+
train_steps_per_second = 0.005
|
| 62 |
+
Figure saved at: /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/training_loss.png
|
| 63 |
+
[WARNING|2025-04-13 00:35:56] llamafactory.extras.ploting:161 >> No metric eval_loss to plot.
|
| 64 |
+
[WARNING|2025-04-13 00:35:56] llamafactory.extras.ploting:161 >> No metric eval_accuracy to plot.
|
| 65 |
+
[INFO|trainer.py:3801] 2025-04-13 00:35:58,206 >> Saving model checkpoint to /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192
|
| 66 |
+
[INFO|configuration_utils.py:414] 2025-04-13 00:35:58,211 >> Configuration saved in /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/config.json
|
| 67 |
+
[INFO|configuration_utils.py:865] 2025-04-13 00:35:58,212 >> Configuration saved in /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/generation_config.json
|
| 68 |
+
[INFO|modeling_utils.py:3043] 2025-04-13 00:36:12,746 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/model.safetensors.index.json.
|
| 69 |
+
[INFO|tokenization_utils_base.py:2646] 2025-04-13 00:36:12,748 >> tokenizer config file saved in /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/tokenizer_config.json
|
| 70 |
+
[INFO|tokenization_utils_base.py:2655] 2025-04-13 00:36:12,749 >> Special tokens file saved in /data/horse/ws/ryma833h-DCFT_Shared/checkpoints/s1k-11-test-192/special_tokens_map.json
|
| 71 |
+
[INFO|modelcard.py:449] 2025-04-13 00:36:12,992 >> Dropping the following result as it does not have all the necessary fields:
|
| 72 |
+
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
| 73 |
+
run-33xvut2k.wandb: 100%|██████████| 131k/131k [00:00<00:00, 174kB/s]
|
wandb/run-20250413_002204-33xvut2k/run-33xvut2k.wandb
CHANGED
|
Binary files a/wandb/run-20250413_002204-33xvut2k/run-33xvut2k.wandb and b/wandb/run-20250413_002204-33xvut2k/run-33xvut2k.wandb differ
|
|
|