ppo-Pyramids / run_logs /timers.json
matheusgeda's picture
First Push
c0a6e30
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.73075932264328,
"min": 0.6708958148956299,
"max": 1.4215344190597534,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 21887.703125,
"min": 20073.203125,
"max": 43123.66796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.09480485320091248,
"min": -0.10160810500383377,
"max": 0.09480485320091248,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 23.701213836669922,
"min": -24.385944366455078,
"max": 23.701213836669922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.049889273941516876,
"min": 0.013466407544910908,
"max": 0.4943709969520569,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 12.472318649291992,
"min": 3.3127362728118896,
"max": 117.16592407226562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07157881780066029,
"min": 0.0639325553748972,
"max": 0.07277367609381515,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.002103449209244,
"min": 0.4974685737832407,
"max": 1.0624960574090903,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009709257556697654,
"min": 0.00011170198267120196,
"max": 0.015417325796398833,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13592960579376714,
"min": 0.0015638277573968274,
"max": 0.13592960579376714,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.280154716171427e-06,
"min": 7.280154716171427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010192216602639998,
"min": 0.00010192216602639998,
"max": 0.0035075879308041,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242668571428569,
"min": 0.10242668571428569,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339735999999996,
"min": 1.3886848,
"max": 2.5691959000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002524259028571428,
"min": 0.0002524259028571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035339626399999995,
"min": 0.0035339626399999995,
"max": 0.11694267040999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013450860045850277,
"min": 0.013450860045850277,
"max": 0.598111629486084,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18831203877925873,
"min": 0.18831203877925873,
"max": 4.186781406402588,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 713.8,
"min": 713.8,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28552.0,
"min": 15984.0,
"max": 32607.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.4859849657863379,
"min": -1.0000000521540642,
"max": 0.4859849657863379,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 19.439398631453514,
"min": -30.99680159986019,
"max": 19.439398631453514,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.4859849657863379,
"min": -1.0000000521540642,
"max": 0.4859849657863379,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 19.439398631453514,
"min": -30.99680159986019,
"max": 19.439398631453514,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0995938936874154,
"min": 0.0995938936874154,
"max": 12.716632867231965,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.983755747496616,
"min": 3.983755747496616,
"max": 203.46612587571144,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697458797",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697460816"
},
"total": 2019.0856081000002,
"count": 1,
"self": 0.6330330089999734,
"children": {
"run_training.setup": {
"total": 0.04492526500001759,
"count": 1,
"self": 0.04492526500001759
},
"TrainerController.start_learning": {
"total": 2018.4076498260001,
"count": 1,
"self": 1.290525824985707,
"children": {
"TrainerController._reset_env": {
"total": 3.7188611079999987,
"count": 1,
"self": 3.7188611079999987
},
"TrainerController.advance": {
"total": 2013.316640359014,
"count": 63171,
"self": 1.39551492294936,
"children": {
"env_step": {
"total": 1398.5654953990208,
"count": 63171,
"self": 1275.9197896580058,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.87336608902484,
"count": 63171,
"self": 4.61206085202781,
"children": {
"TorchPolicy.evaluate": {
"total": 117.26130523699703,
"count": 62561,
"self": 117.26130523699703
}
}
},
"workers": {
"total": 0.7723396519902508,
"count": 63171,
"self": 0.0,
"children": {
"worker_root": {
"total": 2014.0000522339642,
"count": 63171,
"is_parallel": true,
"self": 852.3451737910036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019994669999618964,
"count": 1,
"is_parallel": true,
"self": 0.0006438130000105957,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013556539999513006,
"count": 8,
"is_parallel": true,
"self": 0.0013556539999513006
}
}
},
"UnityEnvironment.step": {
"total": 0.09706946599999355,
"count": 1,
"is_parallel": true,
"self": 0.0006635429999732878,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046991900001103204,
"count": 1,
"is_parallel": true,
"self": 0.00046991900001103204
},
"communicator.exchange": {
"total": 0.09417395400004125,
"count": 1,
"is_parallel": true,
"self": 0.09417395400004125
},
"steps_from_proto": {
"total": 0.001762049999967985,
"count": 1,
"is_parallel": true,
"self": 0.000384370000006129,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001377679999961856,
"count": 8,
"is_parallel": true,
"self": 0.001377679999961856
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1161.6548784429606,
"count": 63170,
"is_parallel": true,
"self": 34.377744162953604,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.206498968996016,
"count": 63170,
"is_parallel": true,
"self": 24.206498968996016
},
"communicator.exchange": {
"total": 1006.5051460910136,
"count": 63170,
"is_parallel": true,
"self": 1006.5051460910136
},
"steps_from_proto": {
"total": 96.5654892199974,
"count": 63170,
"is_parallel": true,
"self": 18.957319100169116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.60817011982829,
"count": 505360,
"is_parallel": true,
"self": 77.60817011982829
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 613.3556300370437,
"count": 63171,
"self": 2.3698330500724296,
"children": {
"process_trajectory": {
"total": 115.98816142897124,
"count": 63171,
"self": 115.8128837979715,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17527763099974436,
"count": 2,
"self": 0.17527763099974436
}
}
},
"_update_policy": {
"total": 494.99763555800007,
"count": 443,
"self": 297.79790413098203,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.19973142701804,
"count": 22740,
"self": 197.19973142701804
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4480001482297666e-06,
"count": 1,
"self": 1.4480001482297666e-06
},
"TrainerController._save_models": {
"total": 0.0816210860002684,
"count": 1,
"self": 0.0013908589999118703,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08023022700035654,
"count": 1,
"self": 0.08023022700035654
}
}
}
}
}
}
}