| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 327, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09216589861751152, | |
| "grad_norm": 5.88378562614363, | |
| "learning_rate": 2.7272727272727272e-06, | |
| "loss": 1.4101, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18433179723502305, | |
| "grad_norm": 4.0683772089528185, | |
| "learning_rate": 5.7575757575757586e-06, | |
| "loss": 1.1185, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2764976958525346, | |
| "grad_norm": 3.2759130835346952, | |
| "learning_rate": 8.787878787878788e-06, | |
| "loss": 0.9879, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3686635944700461, | |
| "grad_norm": 2.464724864079934, | |
| "learning_rate": 9.989726963751683e-06, | |
| "loss": 0.9177, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4608294930875576, | |
| "grad_norm": 2.865448887929804, | |
| "learning_rate": 9.927100106776213e-06, | |
| "loss": 0.9148, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5529953917050692, | |
| "grad_norm": 2.8831532266505877, | |
| "learning_rate": 9.808267184205182e-06, | |
| "loss": 0.8591, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6451612903225806, | |
| "grad_norm": 2.9379163177927907, | |
| "learning_rate": 9.63458378673011e-06, | |
| "loss": 0.8226, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7373271889400922, | |
| "grad_norm": 2.5132229889640483, | |
| "learning_rate": 9.408031213740045e-06, | |
| "loss": 0.8443, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8294930875576036, | |
| "grad_norm": 2.723987078419883, | |
| "learning_rate": 9.131193871579975e-06, | |
| "loss": 0.8669, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9216589861751152, | |
| "grad_norm": 2.6570224472803163, | |
| "learning_rate": 8.807229791845673e-06, | |
| "loss": 0.8335, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0092165898617511, | |
| "grad_norm": 2.3062706535536113, | |
| "learning_rate": 8.439834606028594e-06, | |
| "loss": 0.7944, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.1013824884792627, | |
| "grad_norm": 2.67202891200167, | |
| "learning_rate": 8.033199387471278e-06, | |
| "loss": 0.6197, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.1935483870967742, | |
| "grad_norm": 2.6402741952118784, | |
| "learning_rate": 7.591962841552627e-06, | |
| "loss": 0.6816, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 2.4815187634439457, | |
| "learning_rate": 7.121158389495187e-06, | |
| "loss": 0.6586, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.3778801843317972, | |
| "grad_norm": 3.1534665864232103, | |
| "learning_rate": 6.626156749437736e-06, | |
| "loss": 0.6515, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.4700460829493087, | |
| "grad_norm": 2.970179982947211, | |
| "learning_rate": 6.112604669781572e-06, | |
| "loss": 0.6075, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.5622119815668203, | |
| "grad_norm": 2.3733689338380763, | |
| "learning_rate": 5.586360513712011e-06, | |
| "loss": 0.6335, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.6543778801843319, | |
| "grad_norm": 2.1969587430398603, | |
| "learning_rate": 5.053427429716867e-06, | |
| "loss": 0.6389, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.7465437788018434, | |
| "grad_norm": 2.5202748926163308, | |
| "learning_rate": 4.5198848704615915e-06, | |
| "loss": 0.6368, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.838709677419355, | |
| "grad_norm": 2.9340441283245204, | |
| "learning_rate": 3.991819241221836e-06, | |
| "loss": 0.6449, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.9308755760368663, | |
| "grad_norm": 2.4639066861245755, | |
| "learning_rate": 3.475254469003865e-06, | |
| "loss": 0.6122, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.0184331797235022, | |
| "grad_norm": 2.2827814391590335, | |
| "learning_rate": 2.976083284388031e-06, | |
| "loss": 0.5455, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.110599078341014, | |
| "grad_norm": 2.5223798401802457, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.4434, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.2027649769585254, | |
| "grad_norm": 2.4224966632128893, | |
| "learning_rate": 2.0524355524417017e-06, | |
| "loss": 0.4931, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.294930875576037, | |
| "grad_norm": 2.632339087070492, | |
| "learning_rate": 1.6384955486934157e-06, | |
| "loss": 0.4484, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.3870967741935485, | |
| "grad_norm": 2.4664514268359046, | |
| "learning_rate": 1.2629020237248241e-06, | |
| "loss": 0.4614, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.47926267281106, | |
| "grad_norm": 2.581208637614147, | |
| "learning_rate": 9.299395737170758e-07, | |
| "loss": 0.4306, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 2.8466649665216703, | |
| "learning_rate": 6.43406479383053e-07, | |
| "loss": 0.4492, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.6635944700460827, | |
| "grad_norm": 3.008513981213749, | |
| "learning_rate": 4.0657137694820826e-07, | |
| "loss": 0.4174, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.7557603686635943, | |
| "grad_norm": 1.6678227428736063, | |
| "learning_rate": 2.2213597106929608e-07, | |
| "loss": 0.4491, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.847926267281106, | |
| "grad_norm": 2.449161148128379, | |
| "learning_rate": 9.22042150446728e-08, | |
| "loss": 0.4539, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.9400921658986174, | |
| "grad_norm": 2.5954750303050713, | |
| "learning_rate": 1.8258309893965375e-08, | |
| "loss": 0.4389, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 327, | |
| "total_flos": 52944191225856.0, | |
| "train_loss": 0.6752075332384955, | |
| "train_runtime": 792.8195, | |
| "train_samples_per_second": 26.185, | |
| "train_steps_per_second": 0.412 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 327, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 52944191225856.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |