| { | |
| "best_global_step": 20, | |
| "best_metric": 1207329.5664974535, | |
| "best_model_checkpoint": "/network/scratch/j/jianan.zhao/DNAFM/output/gencode_human_12.8k_12800/CKPT_DEBUG/checkpoint-20", | |
| "epoch": 0.0028368794326241137, | |
| "eval_steps": 10, | |
| "global_step": 20, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "comp/rl_weight": 0.03, | |
| "comp/strictness": 0.0, | |
| "epoch": 0.0014184397163120568, | |
| "grad_norm": 917.1181030273438, | |
| "loss": 144.7748, | |
| "loss_ce": 54.116615295410156, | |
| "loss_region": 0.10434026271104813, | |
| "loss_total": 54.22095489501953, | |
| "lr": 2.20454076850486e-05, | |
| "router/selected_tokens_s0": 22.84375, | |
| "router/selected_tokens_s1": 1.0, | |
| "step": 10, | |
| "tokens_trained": 0.01638272 | |
| }, | |
| { | |
| "epoch": 0.0014184397163120568, | |
| "eval_ppl": 2.1194747907582294e+17, | |
| "eval_runtime": 1.7718, | |
| "step": 10, | |
| "tokens_trained": 0.01638272 | |
| }, | |
| { | |
| "epoch": 0.0014184397163120568, | |
| "eval_F": 7.813110399249941e-05, | |
| "eval_F_cds": 2.439649178448139e-05, | |
| "eval_F_dig": 7.813110399249941e-05, | |
| "eval_F_exon": 6.169412055031155e-05, | |
| "eval_F_intron": 8.631567698207511e-05, | |
| "eval_F_nig": 8.867784676802712e-05, | |
| "eval_F_promoter": 2.898993785611503e-05, | |
| "eval_F_utr": 0.00013418766144453016, | |
| "eval_G": 0.00022184538364639257, | |
| "eval_G_cds": 0.0001901449227851035, | |
| "eval_G_dig": 0.00015412581061020393, | |
| "eval_G_exon": 0.000246234248719847, | |
| "eval_G_intron": 0.00021249317768522492, | |
| "eval_G_nig": 0.00019619516090846272, | |
| "eval_G_promoter": 0.00030238687665044985, | |
| "eval_G_utr": 0.0002784000847059613, | |
| "eval_avg_bp_per_token": 12799.0, | |
| "eval_bp_per_token/cds": 40989.5, | |
| "eval_bp_per_token/dig": 12799.0, | |
| "eval_bp_per_token/exon": 16209.0, | |
| "eval_bp_per_token/intron": 11585.380952380952, | |
| "eval_bp_per_token/nig": 11276.77358490566, | |
| "eval_bp_per_token/promoter": 34494.72727272727, | |
| "eval_bp_per_token/utr": 7452.25, | |
| "eval_ppl_cds": 3.730425344687072e+19, | |
| "eval_ppl_dig": 4.017993685449341e+17, | |
| "eval_ppl_exon": 6.054858771331785e+16, | |
| "eval_ppl_intron": 1.2271560232082048e+17, | |
| "eval_ppl_nig": 3.849790568976093e+16, | |
| "eval_ppl_promoter": 9.761527176325515e+19, | |
| "eval_ppl_utr": 1.747612158170368e+17, | |
| "step": 10, | |
| "tokens_trained": 0.01638272 | |
| }, | |
| { | |
| "comp/rl_weight": 0.03, | |
| "comp/strictness": 0.0, | |
| "epoch": 0.0028368794326241137, | |
| "grad_norm": 518.6296997070312, | |
| "loss": 24.6671, | |
| "loss_ce": 15.385003089904785, | |
| "loss_region": 0.09944470971822739, | |
| "loss_total": 15.484447479248047, | |
| "lr": 4.654030511288038e-05, | |
| "router/selected_tokens_s0": 1.0, | |
| "router/selected_tokens_s1": 1.0, | |
| "step": 20, | |
| "tokens_trained": 0.03276544 | |
| }, | |
| { | |
| "epoch": 0.0028368794326241137, | |
| "eval_ppl": 1207329.5664974535, | |
| "eval_runtime": 1.7227, | |
| "step": 20, | |
| "tokens_trained": 0.03276544 | |
| }, | |
| { | |
| "epoch": 0.0028368794326241137, | |
| "eval_F": 7.813110399249941e-05, | |
| "eval_F_cds": 2.439649178448139e-05, | |
| "eval_F_dig": 7.813110399249941e-05, | |
| "eval_F_exon": 6.169412055031155e-05, | |
| "eval_F_intron": 8.631567698207511e-05, | |
| "eval_F_nig": 8.867784676802712e-05, | |
| "eval_F_promoter": 2.898993785611503e-05, | |
| "eval_F_utr": 0.00013418766144453016, | |
| "eval_G": 7.813110399249941e-05, | |
| "eval_G_cds": 2.439649178448139e-05, | |
| "eval_G_dig": 7.813110399249941e-05, | |
| "eval_G_exon": 6.169412055031155e-05, | |
| "eval_G_intron": 8.631567698207511e-05, | |
| "eval_G_nig": 8.867784676802712e-05, | |
| "eval_G_promoter": 2.898993785611503e-05, | |
| "eval_G_utr": 0.00013418766144453016, | |
| "eval_avg_bp_per_token": 12799.0, | |
| "eval_bp_per_token/cds": 40989.5, | |
| "eval_bp_per_token/dig": 12799.0, | |
| "eval_bp_per_token/exon": 16209.0, | |
| "eval_bp_per_token/intron": 11585.380952380952, | |
| "eval_bp_per_token/nig": 11276.77358490566, | |
| "eval_bp_per_token/promoter": 34494.72727272727, | |
| "eval_bp_per_token/utr": 7452.25, | |
| "eval_ppl_cds": 4840595.678625624, | |
| "eval_ppl_dig": 1044.7725056220102, | |
| "eval_ppl_exon": 2136300.4799613026, | |
| "eval_ppl_intron": 763217.2143647178, | |
| "eval_ppl_nig": 1411875.7684863594, | |
| "eval_ppl_promoter": 4137739.958306599, | |
| "eval_ppl_utr": 2728184.3584358147, | |
| "step": 20, | |
| "tokens_trained": 0.03276544 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 20, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 10, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |