| { | |
| "best_global_step": 20, | |
| "best_metric": 0.3098881333544819, | |
| "best_model_checkpoint": "/content/gemma_lora_imb/checkpoint-20", | |
| "epoch": 1.253968253968254, | |
| "eval_steps": 20, | |
| "global_step": 20, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.253968253968254, | |
| "grad_norm": 164.26077270507812, | |
| "learning_rate": 5.9375e-06, | |
| "loss": 9.5154, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.253968253968254, | |
| "eval_f1_macro": 0.3098881333544819, | |
| "eval_loss": 1.8632150888442993, | |
| "eval_runtime": 11.091, | |
| "eval_samples_per_second": 45.082, | |
| "eval_steps_per_second": 5.68, | |
| "step": 20 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 32, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 329551733809152.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |