| { |
| "best_global_step": 2076, |
| "best_metric": 0.5154462456703186, |
| "best_model_checkpoint": "./mcqa_qwen3_letter_alex_3/checkpoint-2076", |
| "epoch": 3.0, |
| "eval_steps": 500, |
| "global_step": 3114, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.04819277108433735, |
| "grad_norm": 127.03376007080078, |
| "learning_rate": 5.301204819277108e-08, |
| "loss": 0.8688, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.0963855421686747, |
| "grad_norm": 78.18746948242188, |
| "learning_rate": 1.1325301204819277e-07, |
| "loss": 0.7444, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.14457831325301204, |
| "grad_norm": 71.2592544555664, |
| "learning_rate": 1.7349397590361445e-07, |
| "loss": 0.6279, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.1927710843373494, |
| "grad_norm": 60.21193313598633, |
| "learning_rate": 2.3373493975903614e-07, |
| "loss": 0.585, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.24096385542168675, |
| "grad_norm": 47.37508773803711, |
| "learning_rate": 2.939759036144578e-07, |
| "loss": 0.6807, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.2891566265060241, |
| "grad_norm": 64.76005554199219, |
| "learning_rate": 3.542168674698795e-07, |
| "loss": 0.6285, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.3373493975903614, |
| "grad_norm": 55.05120849609375, |
| "learning_rate": 4.144578313253012e-07, |
| "loss": 0.6387, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.3855421686746988, |
| "grad_norm": 65.12383270263672, |
| "learning_rate": 4.7469879518072285e-07, |
| "loss": 0.6121, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.43373493975903615, |
| "grad_norm": 51.33403778076172, |
| "learning_rate": 4.961157246182694e-07, |
| "loss": 0.5945, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.4819277108433735, |
| "grad_norm": 48.93183135986328, |
| "learning_rate": 4.894186980980445e-07, |
| "loss": 0.5771, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.5301204819277109, |
| "grad_norm": 59.7462043762207, |
| "learning_rate": 4.827216715778194e-07, |
| "loss": 0.6032, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.5783132530120482, |
| "grad_norm": 56.27853012084961, |
| "learning_rate": 4.7602464505759444e-07, |
| "loss": 0.5756, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.6265060240963856, |
| "grad_norm": 44.396385192871094, |
| "learning_rate": 4.693276185373694e-07, |
| "loss": 0.6414, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.6746987951807228, |
| "grad_norm": 85.11759948730469, |
| "learning_rate": 4.626305920171444e-07, |
| "loss": 0.5887, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.7228915662650602, |
| "grad_norm": 48.39686584472656, |
| "learning_rate": 4.5593356549691934e-07, |
| "loss": 0.5433, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.7710843373493976, |
| "grad_norm": 51.4890022277832, |
| "learning_rate": 4.492365389766943e-07, |
| "loss": 0.5134, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.8192771084337349, |
| "grad_norm": 92.15757751464844, |
| "learning_rate": 4.425395124564693e-07, |
| "loss": 0.5641, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.8674698795180723, |
| "grad_norm": 49.211238861083984, |
| "learning_rate": 4.358424859362443e-07, |
| "loss": 0.5371, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.9156626506024096, |
| "grad_norm": 46.73378372192383, |
| "learning_rate": 4.2914545941601927e-07, |
| "loss": 0.5762, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.963855421686747, |
| "grad_norm": 62.20621871948242, |
| "learning_rate": 4.2244843289579426e-07, |
| "loss": 0.505, |
| "step": 1000 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 0.5167326331138611, |
| "eval_runtime": 68.7565, |
| "eval_samples_per_second": 45.247, |
| "eval_steps_per_second": 5.658, |
| "step": 1038 |
| }, |
| { |
| "epoch": 1.011566265060241, |
| "grad_norm": 29.95050621032715, |
| "learning_rate": 4.158853469059737e-07, |
| "loss": 0.4691, |
| "step": 1050 |
| }, |
| { |
| "epoch": 1.0597590361445783, |
| "grad_norm": 44.59640884399414, |
| "learning_rate": 4.0918832038574867e-07, |
| "loss": 0.3975, |
| "step": 1100 |
| }, |
| { |
| "epoch": 1.1079518072289156, |
| "grad_norm": 37.64362716674805, |
| "learning_rate": 4.0249129386552365e-07, |
| "loss": 0.3943, |
| "step": 1150 |
| }, |
| { |
| "epoch": 1.1561445783132531, |
| "grad_norm": 61.94697189331055, |
| "learning_rate": 3.957942673452987e-07, |
| "loss": 0.4067, |
| "step": 1200 |
| }, |
| { |
| "epoch": 1.2043373493975904, |
| "grad_norm": 46.150638580322266, |
| "learning_rate": 3.890972408250737e-07, |
| "loss": 0.3821, |
| "step": 1250 |
| }, |
| { |
| "epoch": 1.2525301204819277, |
| "grad_norm": 48.126644134521484, |
| "learning_rate": 3.8240021430484866e-07, |
| "loss": 0.3627, |
| "step": 1300 |
| }, |
| { |
| "epoch": 1.3007228915662652, |
| "grad_norm": 60.339027404785156, |
| "learning_rate": 3.7570318778462364e-07, |
| "loss": 0.4397, |
| "step": 1350 |
| }, |
| { |
| "epoch": 1.3489156626506025, |
| "grad_norm": 59.592315673828125, |
| "learning_rate": 3.6900616126439857e-07, |
| "loss": 0.4537, |
| "step": 1400 |
| }, |
| { |
| "epoch": 1.3971084337349398, |
| "grad_norm": 62.792755126953125, |
| "learning_rate": 3.6230913474417356e-07, |
| "loss": 0.4125, |
| "step": 1450 |
| }, |
| { |
| "epoch": 1.445301204819277, |
| "grad_norm": 42.56241226196289, |
| "learning_rate": 3.5561210822394854e-07, |
| "loss": 0.3997, |
| "step": 1500 |
| }, |
| { |
| "epoch": 1.4934939759036143, |
| "grad_norm": 63.69731903076172, |
| "learning_rate": 3.489150817037235e-07, |
| "loss": 0.4361, |
| "step": 1550 |
| }, |
| { |
| "epoch": 1.5416867469879518, |
| "grad_norm": 54.29156494140625, |
| "learning_rate": 3.422180551834985e-07, |
| "loss": 0.3645, |
| "step": 1600 |
| }, |
| { |
| "epoch": 1.589879518072289, |
| "grad_norm": 71.76103210449219, |
| "learning_rate": 3.355210286632735e-07, |
| "loss": 0.3867, |
| "step": 1650 |
| }, |
| { |
| "epoch": 1.6380722891566264, |
| "grad_norm": 57.209815979003906, |
| "learning_rate": 3.2882400214304847e-07, |
| "loss": 0.4207, |
| "step": 1700 |
| }, |
| { |
| "epoch": 1.686265060240964, |
| "grad_norm": 66.55210876464844, |
| "learning_rate": 3.221269756228234e-07, |
| "loss": 0.3592, |
| "step": 1750 |
| }, |
| { |
| "epoch": 1.7344578313253012, |
| "grad_norm": 73.36932373046875, |
| "learning_rate": 3.1542994910259844e-07, |
| "loss": 0.3515, |
| "step": 1800 |
| }, |
| { |
| "epoch": 1.7826506024096385, |
| "grad_norm": 92.81178283691406, |
| "learning_rate": 3.087329225823734e-07, |
| "loss": 0.3922, |
| "step": 1850 |
| }, |
| { |
| "epoch": 1.830843373493976, |
| "grad_norm": 43.89033889770508, |
| "learning_rate": 3.020358960621484e-07, |
| "loss": 0.4299, |
| "step": 1900 |
| }, |
| { |
| "epoch": 1.8790361445783132, |
| "grad_norm": 70.07054138183594, |
| "learning_rate": 2.953388695419234e-07, |
| "loss": 0.429, |
| "step": 1950 |
| }, |
| { |
| "epoch": 1.9272289156626505, |
| "grad_norm": 40.76694107055664, |
| "learning_rate": 2.886418430216984e-07, |
| "loss": 0.3623, |
| "step": 2000 |
| }, |
| { |
| "epoch": 1.975421686746988, |
| "grad_norm": 58.233970642089844, |
| "learning_rate": 2.8194481650147336e-07, |
| "loss": 0.3876, |
| "step": 2050 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 0.5154462456703186, |
| "eval_runtime": 68.8316, |
| "eval_samples_per_second": 45.197, |
| "eval_steps_per_second": 5.651, |
| "step": 2076 |
| }, |
| { |
| "epoch": 2.023132530120482, |
| "grad_norm": 90.69989013671875, |
| "learning_rate": 2.752477899812483e-07, |
| "loss": 0.3118, |
| "step": 2100 |
| }, |
| { |
| "epoch": 2.071325301204819, |
| "grad_norm": 32.04137420654297, |
| "learning_rate": 2.6855076346102327e-07, |
| "loss": 0.2213, |
| "step": 2150 |
| }, |
| { |
| "epoch": 2.1195180722891567, |
| "grad_norm": 76.33811950683594, |
| "learning_rate": 2.6185373694079826e-07, |
| "loss": 0.274, |
| "step": 2200 |
| }, |
| { |
| "epoch": 2.167710843373494, |
| "grad_norm": 101.23194122314453, |
| "learning_rate": 2.5515671042057324e-07, |
| "loss": 0.2429, |
| "step": 2250 |
| }, |
| { |
| "epoch": 2.2159036144578312, |
| "grad_norm": 44.051597595214844, |
| "learning_rate": 2.484596839003482e-07, |
| "loss": 0.2364, |
| "step": 2300 |
| }, |
| { |
| "epoch": 2.2640963855421687, |
| "grad_norm": 36.38616943359375, |
| "learning_rate": 2.417626573801232e-07, |
| "loss": 0.2762, |
| "step": 2350 |
| }, |
| { |
| "epoch": 2.3122891566265062, |
| "grad_norm": 82.28250885009766, |
| "learning_rate": 2.350656308598982e-07, |
| "loss": 0.262, |
| "step": 2400 |
| }, |
| { |
| "epoch": 2.3604819277108433, |
| "grad_norm": 68.68080139160156, |
| "learning_rate": 2.2836860433967317e-07, |
| "loss": 0.2522, |
| "step": 2450 |
| }, |
| { |
| "epoch": 2.408674698795181, |
| "grad_norm": 1.2003333568572998, |
| "learning_rate": 2.2167157781944816e-07, |
| "loss": 0.2609, |
| "step": 2500 |
| }, |
| { |
| "epoch": 2.4568674698795183, |
| "grad_norm": 63.6321907043457, |
| "learning_rate": 2.1497455129922311e-07, |
| "loss": 0.2236, |
| "step": 2550 |
| }, |
| { |
| "epoch": 2.5050602409638554, |
| "grad_norm": 84.84566497802734, |
| "learning_rate": 2.0827752477899812e-07, |
| "loss": 0.29, |
| "step": 2600 |
| }, |
| { |
| "epoch": 2.553253012048193, |
| "grad_norm": 19.01542854309082, |
| "learning_rate": 2.015804982587731e-07, |
| "loss": 0.2843, |
| "step": 2650 |
| }, |
| { |
| "epoch": 2.6014457831325304, |
| "grad_norm": 55.003379821777344, |
| "learning_rate": 1.9488347173854806e-07, |
| "loss": 0.2207, |
| "step": 2700 |
| }, |
| { |
| "epoch": 2.6496385542168674, |
| "grad_norm": 62.63325119018555, |
| "learning_rate": 1.8818644521832305e-07, |
| "loss": 0.2948, |
| "step": 2750 |
| }, |
| { |
| "epoch": 2.697831325301205, |
| "grad_norm": 38.040775299072266, |
| "learning_rate": 1.8148941869809803e-07, |
| "loss": 0.2269, |
| "step": 2800 |
| }, |
| { |
| "epoch": 2.746024096385542, |
| "grad_norm": 12.635452270507812, |
| "learning_rate": 1.7479239217787301e-07, |
| "loss": 0.2416, |
| "step": 2850 |
| }, |
| { |
| "epoch": 2.7942168674698795, |
| "grad_norm": 54.09561538696289, |
| "learning_rate": 1.68095365657648e-07, |
| "loss": 0.2692, |
| "step": 2900 |
| }, |
| { |
| "epoch": 2.842409638554217, |
| "grad_norm": 87.50553894042969, |
| "learning_rate": 1.6139833913742298e-07, |
| "loss": 0.2877, |
| "step": 2950 |
| }, |
| { |
| "epoch": 2.890602409638554, |
| "grad_norm": 98.76944732666016, |
| "learning_rate": 1.5470131261719797e-07, |
| "loss": 0.2417, |
| "step": 3000 |
| }, |
| { |
| "epoch": 2.9387951807228916, |
| "grad_norm": 69.75730895996094, |
| "learning_rate": 1.4800428609697292e-07, |
| "loss": 0.3266, |
| "step": 3050 |
| }, |
| { |
| "epoch": 2.9869879518072286, |
| "grad_norm": 95.7815170288086, |
| "learning_rate": 1.413072595767479e-07, |
| "loss": 0.2553, |
| "step": 3100 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 0.6084380149841309, |
| "eval_runtime": 68.8044, |
| "eval_samples_per_second": 45.215, |
| "eval_steps_per_second": 5.654, |
| "step": 3114 |
| } |
| ], |
| "logging_steps": 50, |
| "max_steps": 4148, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 4, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2.368011185160192e+16, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|