ckpt_debug / checkpoint-10 /trainer_state.json
andyjzhao's picture
Upload folder using huggingface_hub
5e1aff3 verified
raw
history blame
3.03 kB
{
"best_global_step": 10,
"best_metric": 9.153268951196861e+22,
"best_model_checkpoint": "/gpfs/scratch/guoh/DNAFM/output/gencode_human_12.8k_12800/CKPT_DEBUG/checkpoint-10",
"epoch": 0.00035457220863028757,
"eval_steps": 10,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"comp/rl_weight": 0.03,
"comp/strictness": 0.0,
"epoch": 0.00035457220863028757,
"grad_norm": 1504.030029296875,
"loss": 87.8092,
"loss_ce": 59.68216323852539,
"loss_region": 0.0935576930642128,
"loss_total": 59.77572250366211,
"lr": 2.20454076850486e-05,
"router/selected_tokens_s0": 793.03125,
"router/selected_tokens_s1": 148.84375,
"step": 10,
"tokens_trained": 0.00409568
},
{
"epoch": 0.00035457220863028757,
"eval_ppl": 9.153268951196861e+22,
"eval_runtime": 4.9797,
"step": 10,
"tokens_trained": 0.00409568
},
{
"epoch": 0.00035457220863028757,
"eval_F": 0.00177827022776477,
"eval_F_cds": 0.003658417655291659,
"eval_F_dig": 0.0012045211865510327,
"eval_F_exon": 0.0016746678575415876,
"eval_F_intron": 0.001447564894073858,
"eval_F_nig": 0.0013862277187481856,
"eval_F_promoter": 0.0033154658131890054,
"eval_F_utr": 0.0016332169397963454,
"eval_G": 0.011702613646642867,
"eval_G_cds": 0.023833423264492,
"eval_G_dig": 0.009547610734562596,
"eval_G_exon": 0.01037959719586171,
"eval_G_intron": 0.008864032313383523,
"eval_G_nig": 0.008525809138569675,
"eval_G_promoter": 0.02530388628896865,
"eval_G_utr": 0.010295956315471365,
"eval_avg_bp_per_token": 562.3442288953848,
"eval_bp_per_token/cds": 273.34221902017293,
"eval_bp_per_token/dig": 830.2054054054054,
"eval_bp_per_token/exon": 597.1333333333333,
"eval_bp_per_token/intron": 690.8153161864243,
"eval_bp_per_token/nig": 721.3821989528795,
"eval_bp_per_token/promoter": 301.61674296926094,
"eval_bp_per_token/utr": 612.2885304659499,
"eval_ppl_cds": 6.414658338854343e+21,
"eval_ppl_dig": 4.0669318513627916e+21,
"eval_ppl_exon": 2.4904150523692485e+23,
"eval_ppl_intron": 1.535523404049312e+23,
"eval_ppl_nig": 7.19246164907062e+23,
"eval_ppl_promoter": 1.0487349115377585e+21,
"eval_ppl_utr": 3.14597976396868e+23,
"step": 10,
"tokens_trained": 0.00409568
}
],
"logging_steps": 10,
"max_steps": 20,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}