| { | |
| "best_metric": 1.4546449184417725, | |
| "best_model_checkpoint": "tam_test_out_drug_data_large/checkpoint-13442", | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 13442, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 364818.0, | |
| "learning_rate": 5.7768189257550956e-05, | |
| "loss": 1.9818, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 400334.53125, | |
| "learning_rate": 5.5536378515101923e-05, | |
| "loss": 1.7592, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 406113.28125, | |
| "learning_rate": 5.330456777265288e-05, | |
| "loss": 1.7118, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 331607.125, | |
| "learning_rate": 5.1072757030203845e-05, | |
| "loss": 1.7036, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 390128.875, | |
| "learning_rate": 4.88409462877548e-05, | |
| "loss": 1.6583, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 339776.03125, | |
| "learning_rate": 4.6609135545305754e-05, | |
| "loss": 1.6358, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 782641.125, | |
| "learning_rate": 4.437732480285672e-05, | |
| "loss": 1.5937, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 600757.25, | |
| "learning_rate": 4.2145514060407676e-05, | |
| "loss": 1.6129, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 275153.03125, | |
| "learning_rate": 3.9913703317958644e-05, | |
| "loss": 1.6207, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 471402.28125, | |
| "learning_rate": 3.76818925755096e-05, | |
| "loss": 1.5781, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 225413.765625, | |
| "learning_rate": 3.545008183306055e-05, | |
| "loss": 1.576, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 553825.5, | |
| "learning_rate": 3.321827109061152e-05, | |
| "loss": 1.5546, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 228419.859375, | |
| "learning_rate": 3.0986460348162474e-05, | |
| "loss": 1.5777, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 461608.46875, | |
| "learning_rate": 2.8754649605713435e-05, | |
| "loss": 1.5543, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 303856.875, | |
| "learning_rate": 2.6522838863264396e-05, | |
| "loss": 1.5475, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 366325.34375, | |
| "learning_rate": 2.4291028120815357e-05, | |
| "loss": 1.5295, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 407897.65625, | |
| "learning_rate": 2.2059217378366318e-05, | |
| "loss": 1.5193, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 359063.25, | |
| "learning_rate": 1.9827406635917272e-05, | |
| "loss": 1.5173, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 632423.8125, | |
| "learning_rate": 1.7595595893468233e-05, | |
| "loss": 1.497, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 428668.46875, | |
| "learning_rate": 1.5363785151019194e-05, | |
| "loss": 1.5284, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 406600.40625, | |
| "learning_rate": 1.3131974408570155e-05, | |
| "loss": 1.4979, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 353293.84375, | |
| "learning_rate": 1.0900163666121113e-05, | |
| "loss": 1.4957, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 281095.96875, | |
| "learning_rate": 8.668352923672074e-06, | |
| "loss": 1.4742, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 332004.0625, | |
| "learning_rate": 6.436542181223033e-06, | |
| "loss": 1.4797, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 299908.8125, | |
| "learning_rate": 4.204731438773992e-06, | |
| "loss": 1.47, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 487343.84375, | |
| "learning_rate": 1.972920696324952e-06, | |
| "loss": 1.4685, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.4630335720264112, | |
| "eval_loss": 1.4546449184417725, | |
| "eval_runtime": 302.6957, | |
| "eval_samples_per_second": 35.524, | |
| "eval_steps_per_second": 2.963, | |
| "step": 13442 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 13442, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "total_flos": 2.945281604340096e+17, | |
| "train_batch_size": 12, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |