ppo-pyramids / run_logs /timers.json
welcloud's picture
First Push
16d7915 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4851890504360199,
"min": 0.4738651216030121,
"max": 1.4200611114501953,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14602.25,
"min": 14200.7900390625,
"max": 43078.97265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989930.0,
"min": 29971.0,
"max": 989930.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989930.0,
"min": 29971.0,
"max": 989930.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3410188555717468,
"min": -0.0897306278347969,
"max": 0.35677194595336914,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 90.02897644042969,
"min": -21.804542541503906,
"max": 94.18778991699219,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0012974436394870281,
"min": -0.0033821710385382175,
"max": 0.46434852480888367,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.3425251245498657,
"min": -0.8793644905090332,
"max": 110.51494598388672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0677137653928228,
"min": 0.06656944238610438,
"max": 0.07526195281007816,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9479927154995191,
"min": 0.605810363217116,
"max": 1.0536673393410942,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012445013703698006,
"min": 0.0006729796774595343,
"max": 0.012445013703698006,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17423019185177208,
"min": 0.008748735806973947,
"max": 0.17423019185177208,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3698546862714305e-06,
"min": 7.3698546862714305e-06,
"max": 0.0002949325350224889,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010317796560780002,
"min": 0.00010317796560780002,
"max": 0.0037575910474697003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245658571428572,
"min": 0.10245658571428572,
"max": 0.19831084444444447,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343922,
"min": 1.4343922,
"max": 2.6525303,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002554129128571429,
"min": 0.0002554129128571429,
"max": 0.00983125336,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003575780780000001,
"min": 0.003575780780000001,
"max": 0.12526777697,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010136074386537075,
"min": 0.010136074386537075,
"max": 0.49468284845352173,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1419050395488739,
"min": 0.1419050395488739,
"max": 4.452145576477051,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 503.44262295081967,
"min": 470.31666666666666,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30710.0,
"min": 16769.0,
"max": 33171.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2669704721355048,
"min": -0.9998581159499383,
"max": 1.3629633077730736,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 77.28519880026579,
"min": -30.99560159444809,
"max": 81.77779846638441,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2669704721355048,
"min": -0.9998581159499383,
"max": 1.3629633077730736,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 77.28519880026579,
"min": -30.99560159444809,
"max": 81.77779846638441,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05247215958184288,
"min": 0.05161820470336049,
"max": 10.493693926268154,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2008017344924156,
"min": 3.097092282201629,
"max": 188.88649067282677,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738602626",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738605010"
},
"total": 2384.071341423,
"count": 1,
"self": 0.4764068380000026,
"children": {
"run_training.setup": {
"total": 0.020855919000041467,
"count": 1,
"self": 0.020855919000041467
},
"TrainerController.start_learning": {
"total": 2383.574078666,
"count": 1,
"self": 1.4139266411498284,
"children": {
"TrainerController._reset_env": {
"total": 2.3541655149997496,
"count": 1,
"self": 2.3541655149997496
},
"TrainerController.advance": {
"total": 2379.70231460785,
"count": 63487,
"self": 1.3679987978184727,
"children": {
"env_step": {
"total": 1628.945394239015,
"count": 63487,
"self": 1469.591929730047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.5246442699122,
"count": 63487,
"self": 4.8476766159878935,
"children": {
"TorchPolicy.evaluate": {
"total": 153.6769676539243,
"count": 62539,
"self": 153.6769676539243
}
}
},
"workers": {
"total": 0.8288202390558581,
"count": 63487,
"self": 0.0,
"children": {
"worker_root": {
"total": 2378.236928909006,
"count": 63487,
"is_parallel": true,
"self": 1027.2644158099415,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021465299996634712,
"count": 1,
"is_parallel": true,
"self": 0.0007103909988472878,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014361390008161834,
"count": 8,
"is_parallel": true,
"self": 0.0014361390008161834
}
}
},
"UnityEnvironment.step": {
"total": 0.05045716199992967,
"count": 1,
"is_parallel": true,
"self": 0.0005671649996656924,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006301020002865698,
"count": 1,
"is_parallel": true,
"self": 0.0006301020002865698
},
"communicator.exchange": {
"total": 0.04743878499994025,
"count": 1,
"is_parallel": true,
"self": 0.04743878499994025
},
"steps_from_proto": {
"total": 0.0018211100000371516,
"count": 1,
"is_parallel": true,
"self": 0.0003976779998993152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014234320001378364,
"count": 8,
"is_parallel": true,
"self": 0.0014234320001378364
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1350.9725130990646,
"count": 63486,
"is_parallel": true,
"self": 33.379725546039936,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.62005600610928,
"count": 63486,
"is_parallel": true,
"self": 23.62005600610928
},
"communicator.exchange": {
"total": 1194.08883758102,
"count": 63486,
"is_parallel": true,
"self": 1194.08883758102
},
"steps_from_proto": {
"total": 99.88389396589537,
"count": 63486,
"is_parallel": true,
"self": 19.652364975014734,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.23152899088063,
"count": 507888,
"is_parallel": true,
"self": 80.23152899088063
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 749.3889215710165,
"count": 63487,
"self": 2.753512451999086,
"children": {
"process_trajectory": {
"total": 138.936428137019,
"count": 63487,
"self": 138.6536137700191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2828143669999008,
"count": 2,
"self": 0.2828143669999008
}
}
},
"_update_policy": {
"total": 607.6989809819984,
"count": 462,
"self": 339.369786689937,
"children": {
"TorchPPOOptimizer.update": {
"total": 268.3291942920614,
"count": 22752,
"self": 268.3291942920614
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.490004231338389e-07,
"count": 1,
"self": 9.490004231338389e-07
},
"TrainerController._save_models": {
"total": 0.1036709529998916,
"count": 1,
"self": 0.0018145940002796124,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10185635899961198,
"count": 1,
"self": 0.10185635899961198
}
}
}
}
}
}
}