| { | |
| "best_global_step": 120, | |
| "best_metric": 0.776326044440457, | |
| "best_model_checkpoint": "/content/gemma_lora_imb/checkpoint-120", | |
| "epoch": 1.9047619047619047, | |
| "eval_steps": 20, | |
| "global_step": 120, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.31746031746031744, | |
| "grad_norm": 11.7665376663208, | |
| "learning_rate": 8.492063492063492e-06, | |
| "loss": 0.3174, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.31746031746031744, | |
| "eval_f1_macro": 0.6737641320228203, | |
| "eval_loss": 0.928208589553833, | |
| "eval_runtime": 38.1398, | |
| "eval_samples_per_second": 52.439, | |
| "eval_steps_per_second": 1.652, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 4.18451452255249, | |
| "learning_rate": 6.9047619047619055e-06, | |
| "loss": 0.3193, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "eval_f1_macro": 0.7107737640945189, | |
| "eval_loss": 0.6456462144851685, | |
| "eval_runtime": 38.1401, | |
| "eval_samples_per_second": 52.438, | |
| "eval_steps_per_second": 1.652, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 32.706443786621094, | |
| "learning_rate": 5.317460317460318e-06, | |
| "loss": 0.2606, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "eval_f1_macro": 0.7456057615525826, | |
| "eval_loss": 0.8317409157752991, | |
| "eval_runtime": 38.1406, | |
| "eval_samples_per_second": 52.438, | |
| "eval_steps_per_second": 1.652, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.2698412698412698, | |
| "grad_norm": 4.3210954666137695, | |
| "learning_rate": 3.7301587301587305e-06, | |
| "loss": 0.1377, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2698412698412698, | |
| "eval_f1_macro": 0.7713330490242807, | |
| "eval_loss": 0.807384192943573, | |
| "eval_runtime": 38.1576, | |
| "eval_samples_per_second": 52.414, | |
| "eval_steps_per_second": 1.651, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.5873015873015874, | |
| "grad_norm": 5.902767658233643, | |
| "learning_rate": 2.1428571428571427e-06, | |
| "loss": 0.1266, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.5873015873015874, | |
| "eval_f1_macro": 0.7761022946562621, | |
| "eval_loss": 0.8588651418685913, | |
| "eval_runtime": 38.1597, | |
| "eval_samples_per_second": 52.411, | |
| "eval_steps_per_second": 1.651, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.9047619047619047, | |
| "grad_norm": 4.348827838897705, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 0.1091, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.9047619047619047, | |
| "eval_f1_macro": 0.776326044440457, | |
| "eval_loss": 0.8794072270393372, | |
| "eval_runtime": 38.1823, | |
| "eval_samples_per_second": 52.38, | |
| "eval_steps_per_second": 1.65, | |
| "step": 120 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 126, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.4825056756023296e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |