| { | |
| "best_global_step": 200, | |
| "best_metric": 1.0429821014404297, | |
| "best_model_checkpoint": "models/MNLP_M3_rag_model/checkpoint-200", | |
| "epoch": 1.04177545691906, | |
| "eval_steps": 200, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05221932114882506, | |
| "grad_norm": 388.6032409667969, | |
| "learning_rate": 2.7586206896551725e-06, | |
| "loss": 11.4444, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10443864229765012, | |
| "grad_norm": 16.180984497070312, | |
| "learning_rate": 9.655172413793105e-06, | |
| "loss": 1.9209, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1566579634464752, | |
| "grad_norm": 77.73260498046875, | |
| "learning_rate": 1.6551724137931037e-05, | |
| "loss": 1.3128, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.20887728459530025, | |
| "grad_norm": 103.49502563476562, | |
| "learning_rate": 1.981617647058824e-05, | |
| "loss": 0.7444, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.26109660574412535, | |
| "grad_norm": 71.25463104248047, | |
| "learning_rate": 1.9448529411764706e-05, | |
| "loss": 0.7653, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3133159268929504, | |
| "grad_norm": 168.3430633544922, | |
| "learning_rate": 1.908088235294118e-05, | |
| "loss": 0.6154, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.36553524804177545, | |
| "grad_norm": 46.75175857543945, | |
| "learning_rate": 1.8713235294117647e-05, | |
| "loss": 0.9861, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4177545691906005, | |
| "grad_norm": 45.60316467285156, | |
| "learning_rate": 1.8345588235294118e-05, | |
| "loss": 0.9699, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4699738903394256, | |
| "grad_norm": 72.48204040527344, | |
| "learning_rate": 1.7977941176470592e-05, | |
| "loss": 0.7536, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5221932114882507, | |
| "grad_norm": 125.96574401855469, | |
| "learning_rate": 1.761029411764706e-05, | |
| "loss": 0.55, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5744125326370757, | |
| "grad_norm": 25.672470092773438, | |
| "learning_rate": 1.7242647058823533e-05, | |
| "loss": 1.3258, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6266318537859008, | |
| "grad_norm": 82.88054656982422, | |
| "learning_rate": 1.6875e-05, | |
| "loss": 0.7496, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6788511749347258, | |
| "grad_norm": 66.79105377197266, | |
| "learning_rate": 1.650735294117647e-05, | |
| "loss": 0.7877, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.7310704960835509, | |
| "grad_norm": 99.85301971435547, | |
| "learning_rate": 1.613970588235294e-05, | |
| "loss": 0.6072, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.783289817232376, | |
| "grad_norm": 112.50544738769531, | |
| "learning_rate": 1.5772058823529412e-05, | |
| "loss": 0.6097, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.835509138381201, | |
| "grad_norm": 114.9411849975586, | |
| "learning_rate": 1.5404411764705886e-05, | |
| "loss": 0.6873, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8877284595300261, | |
| "grad_norm": 129.22744750976562, | |
| "learning_rate": 1.5036764705882353e-05, | |
| "loss": 1.1041, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.9399477806788512, | |
| "grad_norm": 43.67152786254883, | |
| "learning_rate": 1.4669117647058824e-05, | |
| "loss": 0.7666, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9921671018276762, | |
| "grad_norm": 53.90073013305664, | |
| "learning_rate": 1.4301470588235296e-05, | |
| "loss": 0.6146, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.04177545691906, | |
| "grad_norm": 279.6166076660156, | |
| "learning_rate": 1.3933823529411765e-05, | |
| "loss": 0.2116, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.04177545691906, | |
| "eval_loss": 1.0429821014404297, | |
| "eval_runtime": 4.8371, | |
| "eval_samples_per_second": 35.351, | |
| "eval_steps_per_second": 4.548, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 573, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2158220059607040.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |