| { | |
| "best_metric": 0.06150464341044426, | |
| "best_model_checkpoint": "/content/drive/MyDrive/dataset_for_research/ct_rate/data/ct_rate_jpn/model_output/sbintuitions_modernbert-ja-130m/checkpoint-2278", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 9112, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.21949078138718173, | |
| "grad_norm": 7.839651584625244, | |
| "learning_rate": 1.8902546093064093e-05, | |
| "loss": 0.2808, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.43898156277436345, | |
| "grad_norm": 3.2797462940216064, | |
| "learning_rate": 1.7805092186128183e-05, | |
| "loss": 0.0933, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6584723441615452, | |
| "grad_norm": 2.731287956237793, | |
| "learning_rate": 1.6707638279192274e-05, | |
| "loss": 0.0792, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8779631255487269, | |
| "grad_norm": 3.9177889823913574, | |
| "learning_rate": 1.561018437225637e-05, | |
| "loss": 0.0695, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9815139986342796, | |
| "eval_f1": 0.9535225948862591, | |
| "eval_loss": 0.06150464341044426, | |
| "eval_precision": 0.9613625123639961, | |
| "eval_recall": 0.9458095122247901, | |
| "eval_runtime": 32.5019, | |
| "eval_samples_per_second": 140.176, | |
| "eval_steps_per_second": 17.537, | |
| "step": 2278 | |
| }, | |
| { | |
| "epoch": 1.0974539069359086, | |
| "grad_norm": 2.4581689834594727, | |
| "learning_rate": 1.4512730465320458e-05, | |
| "loss": 0.0588, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.3169446883230904, | |
| "grad_norm": 2.0007755756378174, | |
| "learning_rate": 1.3415276558384549e-05, | |
| "loss": 0.0554, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.536435469710272, | |
| "grad_norm": 0.9888765811920166, | |
| "learning_rate": 1.2317822651448641e-05, | |
| "loss": 0.0508, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.755926251097454, | |
| "grad_norm": 3.2408642768859863, | |
| "learning_rate": 1.122036874451273e-05, | |
| "loss": 0.0502, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.9754170324846356, | |
| "grad_norm": 2.510002613067627, | |
| "learning_rate": 1.0122914837576823e-05, | |
| "loss": 0.0501, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9830382401716906, | |
| "eval_f1": 0.9576650333262319, | |
| "eval_loss": 0.06173006817698479, | |
| "eval_precision": 0.9584526347852574, | |
| "eval_recall": 0.9568787252159104, | |
| "eval_runtime": 32.5239, | |
| "eval_samples_per_second": 140.082, | |
| "eval_steps_per_second": 17.526, | |
| "step": 4556 | |
| }, | |
| { | |
| "epoch": 2.194907813871817, | |
| "grad_norm": 5.879360675811768, | |
| "learning_rate": 9.025460930640914e-06, | |
| "loss": 0.0346, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.4143985952589992, | |
| "grad_norm": 0.7706089615821838, | |
| "learning_rate": 7.928007023705005e-06, | |
| "loss": 0.0349, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.633889376646181, | |
| "grad_norm": 4.287876129150391, | |
| "learning_rate": 6.830553116769097e-06, | |
| "loss": 0.0321, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.853380158033363, | |
| "grad_norm": 2.919949531555176, | |
| "learning_rate": 5.7330992098331876e-06, | |
| "loss": 0.0315, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9846478392351966, | |
| "eval_f1": 0.9614760870230409, | |
| "eval_loss": 0.06240718811750412, | |
| "eval_precision": 0.9674856826159246, | |
| "eval_recall": 0.9555406884807202, | |
| "eval_runtime": 32.6149, | |
| "eval_samples_per_second": 139.691, | |
| "eval_steps_per_second": 17.477, | |
| "step": 6834 | |
| }, | |
| { | |
| "epoch": 3.0728709394205445, | |
| "grad_norm": 0.055859215557575226, | |
| "learning_rate": 4.6356453028972785e-06, | |
| "loss": 0.0263, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.292361720807726, | |
| "grad_norm": 0.41953563690185547, | |
| "learning_rate": 3.53819139596137e-06, | |
| "loss": 0.0149, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.511852502194908, | |
| "grad_norm": 0.9124375581741333, | |
| "learning_rate": 2.440737489025461e-06, | |
| "loss": 0.0161, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.7313432835820897, | |
| "grad_norm": 1.055453896522522, | |
| "learning_rate": 1.3432835820895524e-06, | |
| "loss": 0.014, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 3.9508340649692713, | |
| "grad_norm": 5.611504077911377, | |
| "learning_rate": 2.458296751536436e-07, | |
| "loss": 0.013, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9850014632718759, | |
| "eval_f1": 0.9624152050357514, | |
| "eval_loss": 0.08579593896865845, | |
| "eval_precision": 0.967084254482928, | |
| "eval_recall": 0.9577910229899039, | |
| "eval_runtime": 32.6214, | |
| "eval_samples_per_second": 139.663, | |
| "eval_steps_per_second": 17.473, | |
| "step": 9112 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 9112, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.3278248486495112e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |