| { | |
| "best_global_step": 100, | |
| "best_metric": 1.198757290840149, | |
| "best_model_checkpoint": "/home/ricoiban/GEMMA/mnlp_chatsplaining/safety_model_output/checkpoint-100", | |
| "epoch": 2.0414507772020727, | |
| "eval_steps": 50, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.20725388601036268, | |
| "grad_norm": 21.424072265625, | |
| "learning_rate": 0.00019666666666666666, | |
| "loss": 11.0272, | |
| "mean_token_accuracy": 0.296875, | |
| "num_tokens": 21984.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.41450777202072536, | |
| "grad_norm": 3.1715638637542725, | |
| "learning_rate": 0.00018833333333333335, | |
| "loss": 0.7306, | |
| "mean_token_accuracy": 0.63125, | |
| "num_tokens": 44111.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6217616580310881, | |
| "grad_norm": 6.497854232788086, | |
| "learning_rate": 0.00018, | |
| "loss": 0.6675, | |
| "mean_token_accuracy": 0.684375, | |
| "num_tokens": 68120.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.8290155440414507, | |
| "grad_norm": 2.4520726203918457, | |
| "learning_rate": 0.00017166666666666667, | |
| "loss": 0.6535, | |
| "mean_token_accuracy": 0.68125, | |
| "num_tokens": 89669.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.0207253886010363, | |
| "grad_norm": 4.587820529937744, | |
| "learning_rate": 0.00016333333333333334, | |
| "loss": 0.6026, | |
| "mean_token_accuracy": 0.7128378378378378, | |
| "num_tokens": 111297.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.0207253886010363, | |
| "eval_loss": 1.4000602960586548, | |
| "eval_mean_token_accuracy": 0.5, | |
| "eval_num_tokens": 111297.0, | |
| "eval_runtime": 0.1262, | |
| "eval_samples_per_second": 7.921, | |
| "eval_steps_per_second": 7.921, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.2279792746113989, | |
| "grad_norm": 2.7760419845581055, | |
| "learning_rate": 0.000155, | |
| "loss": 0.4794, | |
| "mean_token_accuracy": 0.815625, | |
| "num_tokens": 133338.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.4352331606217616, | |
| "grad_norm": 7.210590362548828, | |
| "learning_rate": 0.00014666666666666666, | |
| "loss": 0.4987, | |
| "mean_token_accuracy": 0.79375, | |
| "num_tokens": 156589.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.6424870466321244, | |
| "grad_norm": 3.8868515491485596, | |
| "learning_rate": 0.00013833333333333333, | |
| "loss": 0.4606, | |
| "mean_token_accuracy": 0.7875, | |
| "num_tokens": 178995.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.849740932642487, | |
| "grad_norm": 2.4217886924743652, | |
| "learning_rate": 0.00013000000000000002, | |
| "loss": 0.5303, | |
| "mean_token_accuracy": 0.771875, | |
| "num_tokens": 203491.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.0414507772020727, | |
| "grad_norm": 3.1198058128356934, | |
| "learning_rate": 0.00012166666666666667, | |
| "loss": 0.4854, | |
| "mean_token_accuracy": 0.8108108108108109, | |
| "num_tokens": 223362.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.0414507772020727, | |
| "eval_loss": 1.198757290840149, | |
| "eval_mean_token_accuracy": 0.5, | |
| "eval_num_tokens": 223362.0, | |
| "eval_runtime": 0.1833, | |
| "eval_samples_per_second": 5.455, | |
| "eval_steps_per_second": 5.455, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 245, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 852188282880000.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |