| { | |
| "best_global_step": 400, | |
| "best_metric": 1.5967378616333008, | |
| "best_model_checkpoint": "models/MNLP_M3_rag_model_test/checkpoint-400", | |
| "epoch": 2.08355091383812, | |
| "eval_steps": 200, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05221932114882506, | |
| "grad_norm": 7.428971767425537, | |
| "learning_rate": 6.206896551724138e-06, | |
| "loss": 3.5132, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10443864229765012, | |
| "grad_norm": 6.059654712677002, | |
| "learning_rate": 1.310344827586207e-05, | |
| "loss": 3.0338, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1566579634464752, | |
| "grad_norm": 5.7227091789245605, | |
| "learning_rate": 2e-05, | |
| "loss": 2.9247, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.20887728459530025, | |
| "grad_norm": 6.097289085388184, | |
| "learning_rate": 1.9632352941176472e-05, | |
| "loss": 2.7247, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.26109660574412535, | |
| "grad_norm": 6.7676849365234375, | |
| "learning_rate": 1.9264705882352943e-05, | |
| "loss": 2.649, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3133159268929504, | |
| "grad_norm": 6.696221351623535, | |
| "learning_rate": 1.8897058823529413e-05, | |
| "loss": 2.5119, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.36553524804177545, | |
| "grad_norm": 6.6193108558654785, | |
| "learning_rate": 1.8529411764705884e-05, | |
| "loss": 2.482, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4177545691906005, | |
| "grad_norm": 6.831273555755615, | |
| "learning_rate": 1.8161764705882355e-05, | |
| "loss": 2.3116, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4699738903394256, | |
| "grad_norm": 6.834325790405273, | |
| "learning_rate": 1.783088235294118e-05, | |
| "loss": 2.2966, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5221932114882507, | |
| "grad_norm": 6.186923027038574, | |
| "learning_rate": 1.7463235294117647e-05, | |
| "loss": 2.1808, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5744125326370757, | |
| "grad_norm": 5.660542964935303, | |
| "learning_rate": 1.7095588235294118e-05, | |
| "loss": 2.0213, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6266318537859008, | |
| "grad_norm": 7.235581398010254, | |
| "learning_rate": 1.672794117647059e-05, | |
| "loss": 1.9806, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6788511749347258, | |
| "grad_norm": 5.797588348388672, | |
| "learning_rate": 1.636029411764706e-05, | |
| "loss": 2.0965, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.7310704960835509, | |
| "grad_norm": 7.213879585266113, | |
| "learning_rate": 1.5992647058823533e-05, | |
| "loss": 2.075, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.783289817232376, | |
| "grad_norm": 6.168433666229248, | |
| "learning_rate": 1.5625e-05, | |
| "loss": 2.0189, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.835509138381201, | |
| "grad_norm": 6.239176273345947, | |
| "learning_rate": 1.5257352941176471e-05, | |
| "loss": 1.918, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8877284595300261, | |
| "grad_norm": 6.878653526306152, | |
| "learning_rate": 1.4889705882352943e-05, | |
| "loss": 2.1624, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.9399477806788512, | |
| "grad_norm": 5.984134197235107, | |
| "learning_rate": 1.4522058823529412e-05, | |
| "loss": 1.7616, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9921671018276762, | |
| "grad_norm": 6.3687825202941895, | |
| "learning_rate": 1.4154411764705885e-05, | |
| "loss": 1.902, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.04177545691906, | |
| "grad_norm": 8.597973823547363, | |
| "learning_rate": 1.3786764705882353e-05, | |
| "loss": 1.5014, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.04177545691906, | |
| "eval_loss": 1.7825424671173096, | |
| "eval_runtime": 4.8649, | |
| "eval_samples_per_second": 35.15, | |
| "eval_steps_per_second": 4.522, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0939947780678851, | |
| "grad_norm": 6.6663336753845215, | |
| "learning_rate": 1.3419117647058824e-05, | |
| "loss": 1.3655, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.1462140992167102, | |
| "grad_norm": 6.472996711730957, | |
| "learning_rate": 1.3051470588235295e-05, | |
| "loss": 1.4386, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.1984334203655354, | |
| "grad_norm": 6.847625255584717, | |
| "learning_rate": 1.2683823529411765e-05, | |
| "loss": 1.265, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.2506527415143602, | |
| "grad_norm": 8.223856925964355, | |
| "learning_rate": 1.2316176470588238e-05, | |
| "loss": 1.39, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.3028720626631853, | |
| "grad_norm": 6.357968807220459, | |
| "learning_rate": 1.1948529411764707e-05, | |
| "loss": 1.3307, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.3550913838120104, | |
| "grad_norm": 5.952621936798096, | |
| "learning_rate": 1.1580882352941179e-05, | |
| "loss": 1.3974, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.4073107049608355, | |
| "grad_norm": 6.150119781494141, | |
| "learning_rate": 1.1213235294117648e-05, | |
| "loss": 1.2827, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.4595300261096606, | |
| "grad_norm": 7.437598705291748, | |
| "learning_rate": 1.0845588235294118e-05, | |
| "loss": 1.4003, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.5117493472584855, | |
| "grad_norm": 6.825808525085449, | |
| "learning_rate": 1.0477941176470589e-05, | |
| "loss": 1.2258, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.5639686684073109, | |
| "grad_norm": 6.366878986358643, | |
| "learning_rate": 1.011029411764706e-05, | |
| "loss": 1.3887, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.6161879895561357, | |
| "grad_norm": 6.928258895874023, | |
| "learning_rate": 9.74264705882353e-06, | |
| "loss": 1.2123, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.6684073107049608, | |
| "grad_norm": 6.122648239135742, | |
| "learning_rate": 9.375000000000001e-06, | |
| "loss": 1.239, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.720626631853786, | |
| "grad_norm": 6.110084056854248, | |
| "learning_rate": 9.007352941176471e-06, | |
| "loss": 1.3535, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.7728459530026108, | |
| "grad_norm": 7.0101704597473145, | |
| "learning_rate": 8.639705882352942e-06, | |
| "loss": 1.3841, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.8250652741514362, | |
| "grad_norm": 7.713141918182373, | |
| "learning_rate": 8.272058823529413e-06, | |
| "loss": 1.429, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.877284595300261, | |
| "grad_norm": 6.069569110870361, | |
| "learning_rate": 7.904411764705883e-06, | |
| "loss": 1.2837, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.9295039164490861, | |
| "grad_norm": 6.877740383148193, | |
| "learning_rate": 7.536764705882353e-06, | |
| "loss": 1.2482, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.9817232375979112, | |
| "grad_norm": 6.056829929351807, | |
| "learning_rate": 7.169117647058825e-06, | |
| "loss": 1.1995, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.031331592689295, | |
| "grad_norm": 5.879096508026123, | |
| "learning_rate": 6.801470588235295e-06, | |
| "loss": 0.9507, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.08355091383812, | |
| "grad_norm": 6.686075687408447, | |
| "learning_rate": 6.433823529411766e-06, | |
| "loss": 0.9641, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.08355091383812, | |
| "eval_loss": 1.5967378616333008, | |
| "eval_runtime": 4.6644, | |
| "eval_samples_per_second": 36.66, | |
| "eval_steps_per_second": 4.717, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 573, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4316440119214080.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |