| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.12410027302060064, | |
| "eval_steps": 1000, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0062050136510300325, | |
| "grad_norm": 101.88961029052734, | |
| "learning_rate": 0.0001, | |
| "loss": 4.5088, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.012410027302060065, | |
| "grad_norm": 99.7363510131836, | |
| "learning_rate": 9.999038127056248e-05, | |
| "loss": 0.8644, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.018615040953090096, | |
| "grad_norm": 59.41786575317383, | |
| "learning_rate": 9.996152878304816e-05, | |
| "loss": 0.7189, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.02482005460412013, | |
| "grad_norm": 61.05416488647461, | |
| "learning_rate": 9.991345363842789e-05, | |
| "loss": 0.6253, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.03102506825515016, | |
| "grad_norm": 53.87808609008789, | |
| "learning_rate": 9.9846174333574e-05, | |
| "loss": 0.5863, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.03723008190618019, | |
| "grad_norm": 48.58293914794922, | |
| "learning_rate": 9.975971675414371e-05, | |
| "loss": 0.5555, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.04343509555721023, | |
| "grad_norm": 42.353153228759766, | |
| "learning_rate": 9.965411416461959e-05, | |
| "loss": 0.5566, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04964010920824026, | |
| "grad_norm": 46.06748580932617, | |
| "learning_rate": 9.952940719551112e-05, | |
| "loss": 0.5425, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.05584512285927029, | |
| "grad_norm": 46.486637115478516, | |
| "learning_rate": 9.938564382772205e-05, | |
| "loss": 0.5004, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.06205013651030032, | |
| "grad_norm": 43.99421691894531, | |
| "learning_rate": 9.922287937408994e-05, | |
| "loss": 0.4722, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.06205013651030032, | |
| "eval_avg_non_pair_similarity": -0.00011276310920446573, | |
| "eval_avg_pair_similarity": 0.0020758428336121143, | |
| "eval_loss": 0.44835320115089417, | |
| "eval_runtime": 52.6112, | |
| "eval_samples_per_second": 9.504, | |
| "eval_similarity_ratio": -18.408882552609725, | |
| "eval_steps_per_second": 0.304, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.06825515016133035, | |
| "grad_norm": 38.14698791503906, | |
| "learning_rate": 9.904117645810441e-05, | |
| "loss": 0.4444, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.07446016381236038, | |
| "grad_norm": 29.508113861083984, | |
| "learning_rate": 9.884060498981296e-05, | |
| "loss": 0.4615, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.08066517746339041, | |
| "grad_norm": 34.64622497558594, | |
| "learning_rate": 9.862124213892304e-05, | |
| "loss": 0.4665, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.08687019111442046, | |
| "grad_norm": 40.75010299682617, | |
| "learning_rate": 9.838317230511112e-05, | |
| "loss": 0.4275, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.09307520476545049, | |
| "grad_norm": 28.284589767456055, | |
| "learning_rate": 9.81264870855499e-05, | |
| "loss": 0.4129, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.09928021841648052, | |
| "grad_norm": 34.2197151184082, | |
| "learning_rate": 9.785128523966653e-05, | |
| "loss": 0.4389, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.10548523206751055, | |
| "grad_norm": 27.808895111083984, | |
| "learning_rate": 9.755767265114484e-05, | |
| "loss": 0.4184, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.11169024571854058, | |
| "grad_norm": 35.00907897949219, | |
| "learning_rate": 9.724576228718678e-05, | |
| "loss": 0.3995, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.11789525936957061, | |
| "grad_norm": 30.57769203186035, | |
| "learning_rate": 9.691567415504832e-05, | |
| "loss": 0.415, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.12410027302060064, | |
| "grad_norm": 26.989404678344727, | |
| "learning_rate": 9.656753525586681e-05, | |
| "loss": 0.4052, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.12410027302060064, | |
| "eval_avg_non_pair_similarity": 0.0006865022985091086, | |
| "eval_avg_pair_similarity": 0.007665629971772433, | |
| "eval_loss": 0.3601702153682709, | |
| "eval_runtime": 52.565, | |
| "eval_samples_per_second": 9.512, | |
| "eval_similarity_ratio": 11.166211662247951, | |
| "eval_steps_per_second": 0.304, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 16116, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |