| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.8140870332717896, | |
| "min": 0.8140870332717896, | |
| "max": 1.3965567350387573, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 24500.763671875, | |
| "min": 24500.763671875, | |
| "max": 42365.9453125, | |
| "count": 16 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 479948.0, | |
| "min": 29952.0, | |
| "max": 479948.0, | |
| "count": 16 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 479948.0, | |
| "min": 29952.0, | |
| "max": 479948.0, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.09889139980077744, | |
| "min": -0.0866457149386406, | |
| "max": 0.09889139980077744, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 24.525066375732422, | |
| "min": -20.794971466064453, | |
| "max": 24.525066375732422, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.013121883384883404, | |
| "min": 0.013121883384883404, | |
| "max": 0.5604943633079529, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 3.2542271614074707, | |
| "min": 3.2542271614074707, | |
| "max": 132.837158203125, | |
| "count": 16 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.07320434617189886, | |
| "min": 0.06461985435301723, | |
| "max": 0.0736342190077105, | |
| "count": 16 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 1.024860846406584, | |
| "min": 0.5154395330539735, | |
| "max": 1.024860846406584, | |
| "count": 16 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.008114452609998891, | |
| "min": 0.0007525435187473575, | |
| "max": 0.009662751753405488, | |
| "count": 16 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.11360233653998449, | |
| "min": 0.00903052222496829, | |
| "max": 0.11360233653998449, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 2.0494550311371424e-05, | |
| "min": 2.0494550311371424e-05, | |
| "max": 0.00029030126037577137, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00028692370435919994, | |
| "min": 0.00028692370435919994, | |
| "max": 0.0027993037668988, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10683148571428572, | |
| "min": 0.10683148571428572, | |
| "max": 0.19676708571428575, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4956408, | |
| "min": 1.3773696000000002, | |
| "max": 2.2553123999999998, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0006924654228571428, | |
| "min": 0.0006924654228571428, | |
| "max": 0.00967703186285714, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.009694515919999999, | |
| "min": 0.009694515919999999, | |
| "max": 0.09332680988000003, | |
| "count": 16 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.021623099222779274, | |
| "min": 0.021623099222779274, | |
| "max": 0.5177385210990906, | |
| "count": 16 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.30272337794303894, | |
| "min": 0.30272337794303894, | |
| "max": 3.6241695880889893, | |
| "count": 16 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 776.3, | |
| "min": 775.921052631579, | |
| "max": 999.0, | |
| "count": 16 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 31052.0, | |
| "min": 15984.0, | |
| "max": 33583.0, | |
| "count": 16 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 0.4234449589625001, | |
| "min": -1.0000000521540642, | |
| "max": 0.4234449589625001, | |
| "count": 16 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 16.937798358500004, | |
| "min": -29.947601668536663, | |
| "max": 16.937798358500004, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 0.4234449589625001, | |
| "min": -1.0000000521540642, | |
| "max": 0.4234449589625001, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 16.937798358500004, | |
| "min": -29.947601668536663, | |
| "max": 16.937798358500004, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.17633211204665714, | |
| "min": 0.17633211204665714, | |
| "max": 10.779729153960943, | |
| "count": 16 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 7.053284481866285, | |
| "min": 6.714518571272492, | |
| "max": 172.4756664633751, | |
| "count": 16 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 16 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 16 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1691073002", | |
| "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn --force ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1691073874" | |
| }, | |
| "total": 871.4434206169999, | |
| "count": 1, | |
| "self": 0.32518954499937536, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.04896397800075647, | |
| "count": 1, | |
| "self": 0.04896397800075647 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 871.0692670939998, | |
| "count": 1, | |
| "self": 0.7495274820303166, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 4.376205507999657, | |
| "count": 1, | |
| "self": 4.376205507999657 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 865.8476183079702, | |
| "count": 31605, | |
| "self": 0.7288256318506683, | |
| "children": { | |
| "env_step": { | |
| "total": 568.7123238700296, | |
| "count": 31605, | |
| "self": 512.6138779881412, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 55.648934751046, | |
| "count": 31605, | |
| "self": 2.361382144156778, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 53.28755260688922, | |
| "count": 31316, | |
| "self": 53.28755260688922 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.44951113084243843, | |
| "count": 31605, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 869.6091956549708, | |
| "count": 31605, | |
| "is_parallel": true, | |
| "self": 408.4841973718785, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.001994140000533662, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005668380008501117, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.00142730199968355, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.00142730199968355 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.0463572299995576, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0003554879995135707, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.000373339999896416, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.000373339999896416 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.0444114140000238, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0444114140000238 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0012169880001238198, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0003044640006919508, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.000912523999431869, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.000912523999431869 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 461.1249982830923, | |
| "count": 31604, | |
| "is_parallel": true, | |
| "self": 12.554558846159125, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 8.828483802870323, | |
| "count": 31604, | |
| "is_parallel": true, | |
| "self": 8.828483802870323 | |
| }, | |
| "communicator.exchange": { | |
| "total": 402.5129727841095, | |
| "count": 31604, | |
| "is_parallel": true, | |
| "self": 402.5129727841095 | |
| }, | |
| "steps_from_proto": { | |
| "total": 37.228982849953354, | |
| "count": 31604, | |
| "is_parallel": true, | |
| "self": 7.719658702796551, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 29.509324147156804, | |
| "count": 252832, | |
| "is_parallel": true, | |
| "self": 29.509324147156804 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 296.40646880608983, | |
| "count": 31605, | |
| "self": 1.2728012682000553, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 49.30109030289077, | |
| "count": 31605, | |
| "self": 49.18668687889112, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.1144034239996472, | |
| "count": 1, | |
| "self": 0.1144034239996472 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 245.832577234999, | |
| "count": 212, | |
| "self": 155.21310404001815, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 90.61947319498086, | |
| "count": 11424, | |
| "self": 90.61947319498086 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 9.609993867343292e-07, | |
| "count": 1, | |
| "self": 9.609993867343292e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.09591483500025788, | |
| "count": 1, | |
| "self": 0.0013531670001611928, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.09456166800009669, | |
| "count": 1, | |
| "self": 0.09456166800009669 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |