| { | |
| "best_metric": 0.6549891829490662, | |
| "best_model_checkpoint": "/qumulo/haoyan/DeepSeek-R1-Distill-Qwen-1.5B-full-fine-tuned/checkpoint-400", | |
| "epoch": 1.9230769230769231, | |
| "eval_steps": 20, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09615384615384616, | |
| "grad_norm": 5.49358606338501, | |
| "learning_rate": 1.9230769230769234e-06, | |
| "loss": 1.4696, | |
| "mean_token_accuracy": 0.6305747523903846, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.09615384615384616, | |
| "eval_loss": 1.4054043292999268, | |
| "eval_mean_token_accuracy": 0.6401512276741766, | |
| "eval_runtime": 8.9366, | |
| "eval_samples_per_second": 55.502, | |
| "eval_steps_per_second": 6.938, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19230769230769232, | |
| "grad_norm": 3.203395128250122, | |
| "learning_rate": 3.846153846153847e-06, | |
| "loss": 1.2191, | |
| "mean_token_accuracy": 0.6676446132361888, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.19230769230769232, | |
| "eval_loss": 1.074661135673523, | |
| "eval_mean_token_accuracy": 0.6990310501667761, | |
| "eval_runtime": 8.234, | |
| "eval_samples_per_second": 60.238, | |
| "eval_steps_per_second": 7.53, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.28846153846153844, | |
| "grad_norm": 2.2364749908447266, | |
| "learning_rate": 5.769230769230769e-06, | |
| "loss": 0.939, | |
| "mean_token_accuracy": 0.7283279053866863, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.28846153846153844, | |
| "eval_loss": 0.9099335670471191, | |
| "eval_mean_token_accuracy": 0.7336987901118494, | |
| "eval_runtime": 8.2328, | |
| "eval_samples_per_second": 60.247, | |
| "eval_steps_per_second": 7.531, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.38461538461538464, | |
| "grad_norm": 1.727778434753418, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.7966, | |
| "mean_token_accuracy": 0.7608079120516777, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.38461538461538464, | |
| "eval_loss": 0.8269181847572327, | |
| "eval_mean_token_accuracy": 0.7525214870129863, | |
| "eval_runtime": 8.2272, | |
| "eval_samples_per_second": 60.288, | |
| "eval_steps_per_second": 7.536, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4807692307692308, | |
| "grad_norm": 2.006840229034424, | |
| "learning_rate": 9.615384615384616e-06, | |
| "loss": 0.7144, | |
| "mean_token_accuracy": 0.7767829544842243, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4807692307692308, | |
| "eval_loss": 0.7817137241363525, | |
| "eval_mean_token_accuracy": 0.7639806982009641, | |
| "eval_runtime": 8.2296, | |
| "eval_samples_per_second": 60.27, | |
| "eval_steps_per_second": 7.534, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5769230769230769, | |
| "grad_norm": 2.0230026245117188, | |
| "learning_rate": 9.992791852820709e-06, | |
| "loss": 0.7462, | |
| "mean_token_accuracy": 0.7766949005424977, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5769230769230769, | |
| "eval_loss": 0.7536750435829163, | |
| "eval_mean_token_accuracy": 0.7699809314743165, | |
| "eval_runtime": 8.2279, | |
| "eval_samples_per_second": 60.282, | |
| "eval_steps_per_second": 7.535, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6730769230769231, | |
| "grad_norm": 1.7768949270248413, | |
| "learning_rate": 9.96354437049027e-06, | |
| "loss": 0.6794, | |
| "mean_token_accuracy": 0.7899795390665532, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.6730769230769231, | |
| "eval_loss": 0.7344855070114136, | |
| "eval_mean_token_accuracy": 0.7744387965048513, | |
| "eval_runtime": 8.2306, | |
| "eval_samples_per_second": 60.263, | |
| "eval_steps_per_second": 7.533, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 1.60780930519104, | |
| "learning_rate": 9.911938687078324e-06, | |
| "loss": 0.667, | |
| "mean_token_accuracy": 0.7936442479491234, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "eval_loss": 0.7175356149673462, | |
| "eval_mean_token_accuracy": 0.778878273502473, | |
| "eval_runtime": 8.2259, | |
| "eval_samples_per_second": 60.298, | |
| "eval_steps_per_second": 7.537, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8653846153846154, | |
| "grad_norm": 1.7980819940567017, | |
| "learning_rate": 9.838207259506891e-06, | |
| "loss": 0.6657, | |
| "mean_token_accuracy": 0.7941531106829643, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.8653846153846154, | |
| "eval_loss": 0.7041124105453491, | |
| "eval_mean_token_accuracy": 0.7811994004634119, | |
| "eval_runtime": 8.2296, | |
| "eval_samples_per_second": 60.271, | |
| "eval_steps_per_second": 7.534, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9615384615384616, | |
| "grad_norm": 1.807111144065857, | |
| "learning_rate": 9.742682209735727e-06, | |
| "loss": 0.6299, | |
| "mean_token_accuracy": 0.7983144462108612, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9615384615384616, | |
| "eval_loss": 0.6941556334495544, | |
| "eval_mean_token_accuracy": 0.7831099206401456, | |
| "eval_runtime": 8.2295, | |
| "eval_samples_per_second": 60.271, | |
| "eval_steps_per_second": 7.534, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0576923076923077, | |
| "grad_norm": 1.7085195779800415, | |
| "learning_rate": 9.62579382872462e-06, | |
| "loss": 0.5774, | |
| "mean_token_accuracy": 0.8153253301978112, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0576923076923077, | |
| "eval_loss": 0.6902210116386414, | |
| "eval_mean_token_accuracy": 0.7860495448112488, | |
| "eval_runtime": 8.2283, | |
| "eval_samples_per_second": 60.28, | |
| "eval_steps_per_second": 7.535, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.1538461538461537, | |
| "grad_norm": 1.8473504781723022, | |
| "learning_rate": 9.488068638195072e-06, | |
| "loss": 0.5244, | |
| "mean_token_accuracy": 0.8304496161639691, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.1538461538461537, | |
| "eval_loss": 0.6881601214408875, | |
| "eval_mean_token_accuracy": 0.7877127287849304, | |
| "eval_runtime": 8.2278, | |
| "eval_samples_per_second": 60.283, | |
| "eval_steps_per_second": 7.535, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 1.7634131908416748, | |
| "learning_rate": 9.330127018922195e-06, | |
| "loss": 0.546, | |
| "mean_token_accuracy": 0.8232163846492767, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "eval_loss": 0.6814642548561096, | |
| "eval_mean_token_accuracy": 0.7887057575487322, | |
| "eval_runtime": 8.2245, | |
| "eval_samples_per_second": 60.307, | |
| "eval_steps_per_second": 7.538, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.3461538461538463, | |
| "grad_norm": 1.8421896696090698, | |
| "learning_rate": 9.152680416240059e-06, | |
| "loss": 0.5208, | |
| "mean_token_accuracy": 0.8317944325506688, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.3461538461538463, | |
| "eval_loss": 0.6732472777366638, | |
| "eval_mean_token_accuracy": 0.7908325656767814, | |
| "eval_runtime": 8.2257, | |
| "eval_samples_per_second": 60.299, | |
| "eval_steps_per_second": 7.537, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.4423076923076923, | |
| "grad_norm": 1.7011970281600952, | |
| "learning_rate": 8.95652813534831e-06, | |
| "loss": 0.5174, | |
| "mean_token_accuracy": 0.8322063274681568, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.4423076923076923, | |
| "eval_loss": 0.6705421805381775, | |
| "eval_mean_token_accuracy": 0.7914758151577365, | |
| "eval_runtime": 8.2295, | |
| "eval_samples_per_second": 60.271, | |
| "eval_steps_per_second": 7.534, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 1.6709781885147095, | |
| "learning_rate": 8.742553740855507e-06, | |
| "loss": 0.4951, | |
| "mean_token_accuracy": 0.8397955119609832, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "eval_loss": 0.6695953011512756, | |
| "eval_mean_token_accuracy": 0.7922829043480658, | |
| "eval_runtime": 8.2274, | |
| "eval_samples_per_second": 60.287, | |
| "eval_steps_per_second": 7.536, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.6346153846153846, | |
| "grad_norm": 1.6190528869628906, | |
| "learning_rate": 8.511721076777388e-06, | |
| "loss": 0.4931, | |
| "mean_token_accuracy": 0.838988920301199, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.6346153846153846, | |
| "eval_loss": 0.6621582508087158, | |
| "eval_mean_token_accuracy": 0.7939039574515435, | |
| "eval_runtime": 8.2273, | |
| "eval_samples_per_second": 60.287, | |
| "eval_steps_per_second": 7.536, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.7307692307692308, | |
| "grad_norm": 1.90789794921875, | |
| "learning_rate": 8.265069924917925e-06, | |
| "loss": 0.4777, | |
| "mean_token_accuracy": 0.8394413135945797, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.7307692307692308, | |
| "eval_loss": 0.6622117757797241, | |
| "eval_mean_token_accuracy": 0.7936465576771767, | |
| "eval_runtime": 8.2226, | |
| "eval_samples_per_second": 60.321, | |
| "eval_steps_per_second": 7.54, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.8269230769230769, | |
| "grad_norm": 1.803838849067688, | |
| "learning_rate": 8.003711321189895e-06, | |
| "loss": 0.4876, | |
| "mean_token_accuracy": 0.837741208076477, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.8269230769230769, | |
| "eval_loss": 0.6593651175498962, | |
| "eval_mean_token_accuracy": 0.7955336032375213, | |
| "eval_runtime": 8.2287, | |
| "eval_samples_per_second": 60.277, | |
| "eval_steps_per_second": 7.535, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "grad_norm": 1.4846165180206299, | |
| "learning_rate": 7.728822550972523e-06, | |
| "loss": 0.4966, | |
| "mean_token_accuracy": 0.8370955176651478, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "eval_loss": 0.6549891829490662, | |
| "eval_mean_token_accuracy": 0.7954852792524523, | |
| "eval_runtime": 8.2285, | |
| "eval_samples_per_second": 60.278, | |
| "eval_steps_per_second": 7.535, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 1040, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 20, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.789277068106138e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |