{ "best_global_step": 2000, "best_metric": 0.782435168791527, "best_model_checkpoint": "./results/run-0/checkpoint-2000", "epoch": 1.0, "eval_steps": 500, "global_step": 2000, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.05, "grad_norm": 2.039576768875122, "learning_rate": 1.447537100215313e-06, "loss": 0.0, "step": 100 }, { "epoch": 0.1, "grad_norm": 4.460484504699707, "learning_rate": 2.9096957873014878e-06, "loss": 0.0821, "step": 200 }, { "epoch": 0.15, "grad_norm": 26.84711456298828, "learning_rate": 4.371854474387663e-06, "loss": 0.0519, "step": 300 }, { "epoch": 0.2, "grad_norm": 0.6646966934204102, "learning_rate": 5.834013161473838e-06, "loss": 0.0, "step": 400 }, { "epoch": 0.25, "grad_norm": 1.8385223150253296, "learning_rate": 7.296171848560012e-06, "loss": 0.0, "step": 500 }, { "epoch": 0.3, "grad_norm": 0.9234638810157776, "learning_rate": 8.758330535646188e-06, "loss": 0.0, "step": 600 }, { "epoch": 0.35, "grad_norm": 38.24382400512695, "learning_rate": 9.9933989317167e-06, "loss": 0.0, "step": 700 }, { "epoch": 0.4, "grad_norm": 42.83884811401367, "learning_rate": 9.94162234536513e-06, "loss": 0.0, "step": 800 }, { "epoch": 0.45, "grad_norm": 0.8020399808883667, "learning_rate": 9.889845759013559e-06, "loss": 0.0, "step": 900 }, { "epoch": 0.5, "grad_norm": 0.86091148853302, "learning_rate": 9.838069172661988e-06, "loss": 0.0, "step": 1000 }, { "epoch": 0.55, "grad_norm": 0.16439934074878693, "learning_rate": 9.786292586310415e-06, "loss": 0.0, "step": 1100 }, { "epoch": 0.6, "grad_norm": 2.752115488052368, "learning_rate": 9.734515999958846e-06, "loss": 0.0, "step": 1200 }, { "epoch": 0.65, "grad_norm": 1.5777802467346191, "learning_rate": 9.682739413607275e-06, "loss": 0.0, "step": 1300 }, { "epoch": 0.7, "grad_norm": 5.708035945892334, "learning_rate": 9.630962827255703e-06, "loss": 0.0598, "step": 1400 }, { "epoch": 0.75, "grad_norm": 0.3160113990306854, "learning_rate": 9.579186240904132e-06, "loss": 0.0441, "step": 1500 }, { "epoch": 0.8, "grad_norm": 1.2273201942443848, "learning_rate": 9.527409654552563e-06, "loss": 0.0, "step": 1600 }, { "epoch": 0.85, "grad_norm": 29.215551376342773, "learning_rate": 9.47563306820099e-06, "loss": 0.0, "step": 1700 }, { "epoch": 0.9, "grad_norm": 0.18806450068950653, "learning_rate": 9.42385648184942e-06, "loss": 0.0, "step": 1800 }, { "epoch": 0.95, "grad_norm": 0.41723617911338806, "learning_rate": 9.372079895497849e-06, "loss": 0.0183, "step": 1900 }, { "epoch": 1.0, "grad_norm": 1.9550623893737793, "learning_rate": 9.32030330914628e-06, "loss": 0.0, "step": 2000 }, { "epoch": 1.0, "eval_economic_inequality_accuracy": 0.8047858942065491, "eval_economic_inequality_f1": 0.7536914695213144, "eval_economic_policy_benefits_accuracy": 0.8282828282828283, "eval_economic_policy_benefits_f1": 0.7567090529234769, "eval_ethnic_boundaries_accuracy": 0.9526143790849673, "eval_ethnic_boundaries_f1": 0.9344984933861545, "eval_language_policy_accuracy": 0.7058823529411765, "eval_language_policy_f1": 0.6071428571428571, "eval_loss": NaN, "eval_mother_tongue_education_accuracy": 0.8888888888888888, "eval_mother_tongue_education_f1": 0.837037037037037, "eval_overall_accuracy": 0.8392311075242039, "eval_overall_f1": 0.782435168791527, "eval_religion_ethnic_identity_accuracy": 0.8549323017408124, "eval_religion_ethnic_identity_f1": 0.8055321027383222, "eval_runtime": 4.4874, "eval_samples_per_second": 891.378, "eval_steps_per_second": 55.711, "step": 2000 } ], "logging_steps": 100, "max_steps": 20000, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 2, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 4210381651968000.0, "train_batch_size": 8, "trial_name": null, "trial_params": { "gradient_accumulation_steps": 1, "learning_rate": 1.0001165419669436e-05, "num_train_epochs": 10, "per_device_train_batch_size": 8, "warmup_steps": 684, "weight_decay": 0.07522641729243515 } }