Invalid JSON:
Unexpected token 'N', ..."al_loss": NaN,
"... is not valid JSON
| { | |
| "best_global_step": 1500, | |
| "best_metric": 0.7817611579593701, | |
| "best_model_checkpoint": "./results/run-3/checkpoint-1500", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 1500, | |
| "is_hyper_param_search": true, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.3840266466140747, | |
| "learning_rate": 5.623773985869002e-05, | |
| "loss": 0.4015, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 36.44963455200195, | |
| "learning_rate": 6.805455705058522e-05, | |
| "loss": 0.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.6683640480041504, | |
| "learning_rate": 6.718217457333122e-05, | |
| "loss": 0.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.7861915230751038, | |
| "learning_rate": 6.630979209607721e-05, | |
| "loss": 0.1644, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 2.053349256515503, | |
| "learning_rate": 6.54374096188232e-05, | |
| "loss": 0.0025, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_economic_inequality_accuracy": 0.7770780856423174, | |
| "eval_economic_inequality_f1": 0.6795991195482776, | |
| "eval_economic_policy_benefits_accuracy": 0.8215488215488216, | |
| "eval_economic_policy_benefits_f1": 0.7410643713786043, | |
| "eval_ethnic_boundaries_accuracy": 0.946078431372549, | |
| "eval_ethnic_boundaries_f1": 0.9198646713093299, | |
| "eval_language_policy_accuracy": 0.6764705882352942, | |
| "eval_language_policy_f1": 0.545923632610939, | |
| "eval_loss": NaN, | |
| "eval_mother_tongue_education_accuracy": 0.7777777777777778, | |
| "eval_mother_tongue_education_f1": 0.6805555555555556, | |
| "eval_overall_accuracy": 0.8064020197505432, | |
| "eval_overall_f1": 0.7222001215424422, | |
| "eval_religion_ethnic_identity_accuracy": 0.839458413926499, | |
| "eval_religion_ethnic_identity_f1": 0.7661933788519465, | |
| "eval_runtime": 4.4769, | |
| "eval_samples_per_second": 893.468, | |
| "eval_steps_per_second": 55.842, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 4.278098106384277, | |
| "learning_rate": 6.456502714156919e-05, | |
| "loss": 0.003, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 2.1036500930786133, | |
| "learning_rate": 6.369264466431519e-05, | |
| "loss": 0.0604, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 1.2531561851501465, | |
| "learning_rate": 6.282026218706118e-05, | |
| "loss": 0.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 1.4897408485412598, | |
| "learning_rate": 6.194787970980717e-05, | |
| "loss": 0.0114, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.9127349853515625, | |
| "learning_rate": 6.107549723255315e-05, | |
| "loss": 25991.0925, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_economic_inequality_accuracy": 0.8224181360201511, | |
| "eval_economic_inequality_f1": 0.7973444391161673, | |
| "eval_economic_policy_benefits_accuracy": 0.8215488215488216, | |
| "eval_economic_policy_benefits_f1": 0.7410643713786043, | |
| "eval_ethnic_boundaries_accuracy": 0.9330065359477124, | |
| "eval_ethnic_boundaries_f1": 0.9306378882302275, | |
| "eval_language_policy_accuracy": 0.6764705882352942, | |
| "eval_language_policy_f1": 0.545923632610939, | |
| "eval_loss": NaN, | |
| "eval_mother_tongue_education_accuracy": 0.7777777777777778, | |
| "eval_mother_tongue_education_f1": 0.6805555555555556, | |
| "eval_overall_accuracy": 0.8117800455760427, | |
| "eval_overall_f1": 0.7478535187845178, | |
| "eval_religion_ethnic_identity_accuracy": 0.839458413926499, | |
| "eval_religion_ethnic_identity_f1": 0.7915952258156131, | |
| "eval_runtime": 4.4757, | |
| "eval_samples_per_second": 893.725, | |
| "eval_steps_per_second": 55.858, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 0.9552398324012756, | |
| "learning_rate": 6.020311475529915e-05, | |
| "loss": 0.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 1.114319920539856, | |
| "learning_rate": 5.9330732278045136e-05, | |
| "loss": 0.0009, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.16842737793922424, | |
| "learning_rate": 5.845834980079113e-05, | |
| "loss": 0.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 1.0355138778686523, | |
| "learning_rate": 5.758596732353712e-05, | |
| "loss": 0.0638, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.5731643438339233, | |
| "learning_rate": 5.6713584846283115e-05, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_economic_inequality_accuracy": 0.8224181360201511, | |
| "eval_economic_inequality_f1": 0.7938100421450593, | |
| "eval_economic_policy_benefits_accuracy": 0.8383838383838383, | |
| "eval_economic_policy_benefits_f1": 0.8108014554114583, | |
| "eval_ethnic_boundaries_accuracy": 0.9395424836601307, | |
| "eval_ethnic_boundaries_f1": 0.9340440546398507, | |
| "eval_language_policy_accuracy": 0.6764705882352942, | |
| "eval_language_policy_f1": 0.6109373423468875, | |
| "eval_loss": NaN, | |
| "eval_mother_tongue_education_accuracy": 0.7777777777777778, | |
| "eval_mother_tongue_education_f1": 0.7407407407407408, | |
| "eval_overall_accuracy": 0.8150304610083521, | |
| "eval_overall_f1": 0.7817611579593701, | |
| "eval_religion_ethnic_identity_accuracy": 0.8355899419729207, | |
| "eval_religion_ethnic_identity_f1": 0.8002333124722241, | |
| "eval_runtime": 4.4858, | |
| "eval_samples_per_second": 891.71, | |
| "eval_steps_per_second": 55.732, | |
| "step": 1500 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 8000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 16, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 2, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.2631144955904e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": { | |
| "gradient_accumulation_steps": 4, | |
| "learning_rate": 6.873501538284335e-05, | |
| "num_train_epochs": 16, | |
| "per_device_train_batch_size": 8, | |
| "warmup_steps": 121, | |
| "weight_decay": 0.08748775333302956 | |
| } | |
| } | |