File size: 3,009 Bytes
1f449ad 3d7cd17 475bd73 3d7cd17 1f449ad 3d7cd17 1f449ad 3d7cd17 475bd73 1f449ad 3d7cd17 1f449ad 3d7cd17 1f449ad 3d7cd17 1f449ad 3d7cd17 475bd73 3d7cd17 475bd73 3d7cd17 475bd73 3d7cd17 475bd73 3d7cd17 1f449ad 3d7cd17 1f449ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
{
"best_global_step": 10,
"best_metric": 2.1194747907582294e+17,
"best_model_checkpoint": "/network/scratch/j/jianan.zhao/DNAFM/output/gencode_human_12.8k_12800/CKPT_DEBUG/checkpoint-10",
"epoch": 0.0014184397163120568,
"eval_steps": 10,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"comp/rl_weight": 0.03,
"comp/strictness": 0.0,
"epoch": 0.0014184397163120568,
"grad_norm": 917.1181030273438,
"loss": 144.7748,
"loss_ce": 54.116615295410156,
"loss_region": 0.10434026271104813,
"loss_total": 54.22095489501953,
"lr": 2.20454076850486e-05,
"router/selected_tokens_s0": 22.84375,
"router/selected_tokens_s1": 1.0,
"step": 10,
"tokens_trained": 0.01638272
},
{
"epoch": 0.0014184397163120568,
"eval_ppl": 2.1194747907582294e+17,
"eval_runtime": 1.7718,
"step": 10,
"tokens_trained": 0.01638272
},
{
"epoch": 0.0014184397163120568,
"eval_F": 7.813110399249941e-05,
"eval_F_cds": 2.439649178448139e-05,
"eval_F_dig": 7.813110399249941e-05,
"eval_F_exon": 6.169412055031155e-05,
"eval_F_intron": 8.631567698207511e-05,
"eval_F_nig": 8.867784676802712e-05,
"eval_F_promoter": 2.898993785611503e-05,
"eval_F_utr": 0.00013418766144453016,
"eval_G": 0.00022184538364639257,
"eval_G_cds": 0.0001901449227851035,
"eval_G_dig": 0.00015412581061020393,
"eval_G_exon": 0.000246234248719847,
"eval_G_intron": 0.00021249317768522492,
"eval_G_nig": 0.00019619516090846272,
"eval_G_promoter": 0.00030238687665044985,
"eval_G_utr": 0.0002784000847059613,
"eval_avg_bp_per_token": 12799.0,
"eval_bp_per_token/cds": 40989.5,
"eval_bp_per_token/dig": 12799.0,
"eval_bp_per_token/exon": 16209.0,
"eval_bp_per_token/intron": 11585.380952380952,
"eval_bp_per_token/nig": 11276.77358490566,
"eval_bp_per_token/promoter": 34494.72727272727,
"eval_bp_per_token/utr": 7452.25,
"eval_ppl_cds": 3.730425344687072e+19,
"eval_ppl_dig": 4.017993685449341e+17,
"eval_ppl_exon": 6.054858771331785e+16,
"eval_ppl_intron": 1.2271560232082048e+17,
"eval_ppl_nig": 3.849790568976093e+16,
"eval_ppl_promoter": 9.761527176325515e+19,
"eval_ppl_utr": 1.747612158170368e+17,
"step": 10,
"tokens_trained": 0.01638272
}
],
"logging_steps": 10,
"max_steps": 20,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}
|