LLaDA-8B-Instruct-DLPO / trainer_state.json
howey's picture
Model save
0f31650 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.869565217391305,
"eval_steps": 500,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"clip_ratio": 0.0,
"completion_length": 218.75,
"epoch": 0.17391304347826086,
"grad_norm": 2422.725830078125,
"learning_rate": 6.666666666666666e-07,
"loss": 0.0,
"reward": 0.25,
"rewards/accuracy_reward": 0.25,
"step": 1
},
{
"clip_ratio": 0.0,
"completion_length": 376.25,
"epoch": 0.34782608695652173,
"grad_norm": 321.9919738769531,
"learning_rate": 1.3333333333333332e-06,
"loss": -0.25,
"reward": 0.25,
"rewards/accuracy_reward": 0.25,
"step": 2
},
{
"clip_ratio": 0.0,
"completion_length": 347.125,
"epoch": 0.6956521739130435,
"grad_norm": 527.0032958984375,
"learning_rate": 1.9898214418809326e-06,
"loss": -0.125,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 4
},
{
"clip_ratio": 0.0,
"completion_length": 307.0,
"epoch": 1.1739130434782608,
"grad_norm": 0.0,
"learning_rate": 1.9096319953545185e-06,
"loss": 0.0625,
"reward": 0.0,
"rewards/accuracy_reward": 0.0,
"step": 6
},
{
"clip_ratio": 0.0,
"completion_length": 383.625,
"epoch": 1.5217391304347827,
"grad_norm": 8402.4208984375,
"learning_rate": 1.7557495743542582e-06,
"loss": 0.125,
"reward": 0.0,
"rewards/accuracy_reward": 0.0,
"step": 8
},
{
"clip_ratio": 0.0,
"completion_length": 270.125,
"epoch": 1.8695652173913042,
"grad_norm": 255.75051879882812,
"learning_rate": 1.5406408174555977e-06,
"loss": 0.0,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 10
},
{
"clip_ratio": 0.0,
"completion_length": 250.875,
"epoch": 2.3478260869565215,
"grad_norm": 0.0,
"learning_rate": 1.2817325568414297e-06,
"loss": 0.0,
"reward": 0.0,
"rewards/accuracy_reward": 0.0,
"step": 12
},
{
"clip_ratio": 0.0,
"completion_length": 270.0,
"epoch": 2.6956521739130435,
"grad_norm": 0.0,
"learning_rate": 1e-06,
"loss": 0.125,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 14
},
{
"clip_ratio": 0.0,
"completion_length": 157.25,
"epoch": 3.1739130434782608,
"grad_norm": 0.0,
"learning_rate": 7.182674431585702e-07,
"loss": 0.0,
"reward": 0.0,
"rewards/accuracy_reward": 0.0,
"step": 16
},
{
"clip_ratio": 0.0,
"completion_length": 232.75,
"epoch": 3.5217391304347827,
"grad_norm": 0.0,
"learning_rate": 4.5935918254440274e-07,
"loss": 0.0,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 18
},
{
"clip_ratio": 0.0,
"completion_length": 253.5,
"epoch": 3.869565217391304,
"grad_norm": 1907.75390625,
"learning_rate": 2.4425042564574185e-07,
"loss": -0.1875,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 20
},
{
"clip_ratio": 0.0,
"completion_length": 327.5,
"epoch": 4.3478260869565215,
"grad_norm": 6329.58837890625,
"learning_rate": 9.036800464548156e-08,
"loss": 0.0625,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 22
},
{
"clip_ratio": 0.0,
"completion_length": 242.5,
"epoch": 4.695652173913043,
"grad_norm": 0.0,
"learning_rate": 1.0178558119067315e-08,
"loss": -0.125,
"reward": 0.125,
"rewards/accuracy_reward": 0.125,
"step": 24
},
{
"epoch": 4.869565217391305,
"step": 25,
"total_flos": 0.0,
"train_loss": 0.0,
"train_runtime": 1.5902,
"train_samples_per_second": 72.319,
"train_steps_per_second": 15.722
}
],
"logging_steps": 2,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}