ppo-Pyramids / run_logs /timers.json
tingxun's picture
First training of PyramidsRND
ce17ed5 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.14759185910224915,
"min": 0.14025941491127014,
"max": 0.33215630054473877,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 4484.43115234375,
"min": 4207.7822265625,
"max": 10171.9541015625,
"count": 50
},
"Pyramids.Step.mean": {
"value": 2999999.0,
"min": 1529929.0,
"max": 2999999.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 2999999.0,
"min": 1529929.0,
"max": 2999999.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7630169987678528,
"min": 0.5922383666038513,
"max": 0.7677894830703735,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 225.0900115966797,
"min": 165.23451232910156,
"max": 228.03347778320312,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015491118654608727,
"min": 0.008117031306028366,
"max": 0.0288896132260561,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.56988000869751,
"min": 2.370173215866089,
"max": 8.233539581298828,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 242.82203389830508,
"min": 222.50746268656715,
"max": 325.75,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28653.0,
"min": 26537.0,
"max": 31592.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7571779478151919,
"min": 1.586324718850915,
"max": 1.777492522637346,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 207.34699784219265,
"min": 145.8119983226061,
"max": 238.18399803340435,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7571779478151919,
"min": 1.586324718850915,
"max": 1.777492522637346,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 207.34699784219265,
"min": 145.8119983226061,
"max": 238.18399803340435,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028435185211646504,
"min": 0.026604726313148128,
"max": 0.06942069130084362,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3553518549742876,
"min": 3.3553518549742876,
"max": 6.733807056181831,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.019560502592163784,
"min": 0.019560502592163784,
"max": 0.0276279745134525,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.05868150777649135,
"min": 0.04262920394539833,
"max": 0.08060212047537789,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01643072345604499,
"min": 0.014319621746738752,
"max": 0.017897415434320767,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04929217036813498,
"min": 0.030501069389283656,
"max": 0.0536922463029623,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.6612994462666625e-06,
"min": 1.6612994462666625e-06,
"max": 0.00014845130051624998,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.983898338799987e-06,
"min": 4.983898338799987e-06,
"max": 0.0004376169541277333,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10055373333333334,
"min": 0.10055373333333334,
"max": 0.14948375000000003,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.3016612,
"min": 0.2090047333333333,
"max": 0.4458722666666667,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 6.531795999999988e-05,
"min": 6.531795999999988e-05,
"max": 0.004953426625,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0001959538799999996,
"min": 0.0001959538799999996,
"max": 0.014602639439999999,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011611025780439377,
"min": 0.01159887108951807,
"max": 0.02117346040904522,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.03483307734131813,
"min": 0.02372013032436371,
"max": 0.06352037936449051,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746508706",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/home/txshi/miniconda3/envs/hf-rl-ch5/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746509653"
},
"total": 946.4430587530005,
"count": 1,
"self": 0.1665285330018378,
"children": {
"run_training.setup": {
"total": 0.015372962998299045,
"count": 1,
"self": 0.015372962998299045
},
"TrainerController.start_learning": {
"total": 946.2611572570004,
"count": 1,
"self": 0.7353149860136909,
"children": {
"TrainerController._reset_env": {
"total": 1.3560721989997546,
"count": 1,
"self": 1.3560721989997546
},
"TrainerController.advance": {
"total": 944.1236960439855,
"count": 98280,
"self": 0.6919071292486478,
"children": {
"env_step": {
"total": 628.7653076611532,
"count": 98280,
"self": 501.679360544118,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.59082360999855,
"count": 98280,
"self": 2.55443194534746,
"children": {
"TorchPolicy.evaluate": {
"total": 124.03639166465109,
"count": 93817,
"self": 124.03639166465109
}
}
},
"workers": {
"total": 0.4951235070366238,
"count": 98280,
"self": 0.0,
"children": {
"worker_root": {
"total": 945.3021435918654,
"count": 98280,
"is_parallel": true,
"self": 489.8738490467058,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006208550003066193,
"count": 1,
"is_parallel": true,
"self": 0.0001753210017341189,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004455339985725004,
"count": 8,
"is_parallel": true,
"self": 0.0004455339985725004
}
}
},
"UnityEnvironment.step": {
"total": 0.012712784000541433,
"count": 1,
"is_parallel": true,
"self": 0.00012432600124157034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00013519099957193248,
"count": 1,
"is_parallel": true,
"self": 0.00013519099957193248
},
"communicator.exchange": {
"total": 0.012097856999389478,
"count": 1,
"is_parallel": true,
"self": 0.012097856999389478
},
"steps_from_proto": {
"total": 0.00035541000033845194,
"count": 1,
"is_parallel": true,
"self": 8.841799717629328e-05,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00026699200316215865,
"count": 8,
"is_parallel": true,
"self": 0.00026699200316215865
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 455.4282945451596,
"count": 98279,
"is_parallel": true,
"self": 10.6707104262332,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.716008393870652,
"count": 98279,
"is_parallel": true,
"self": 6.716008393870652
},
"communicator.exchange": {
"total": 409.69189717502195,
"count": 98279,
"is_parallel": true,
"self": 409.69189717502195
},
"steps_from_proto": {
"total": 28.349678550033786,
"count": 98279,
"is_parallel": true,
"self": 6.308964319758161,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.040714230275626,
"count": 786232,
"is_parallel": true,
"self": 22.040714230275626
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 314.6664812535837,
"count": 98280,
"self": 1.5202239386580914,
"children": {
"process_trajectory": {
"total": 79.52096247990994,
"count": 98280,
"self": 79.37046390090836,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15049857900157804,
"count": 3,
"self": 0.15049857900157804
}
}
},
"_update_policy": {
"total": 233.62529483501567,
"count": 145,
"self": 185.6884477689655,
"children": {
"TorchPPOOptimizer.update": {
"total": 47.93684706605018,
"count": 7250,
"self": 47.93684706605018
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.420016921358183e-07,
"count": 1,
"self": 4.420016921358183e-07
},
"TrainerController._save_models": {
"total": 0.04607358599969302,
"count": 1,
"self": 0.0011363630001142155,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04493722299957881,
"count": 1,
"self": 0.04493722299957881
}
}
}
}
}
}
}