ppo-Pyramids / run_logs /timers.json
omriKramer's picture
First Push
2bbe6df
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4229068458080292,
"min": 0.4206945598125458,
"max": 1.3808969259262085,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12653.373046875,
"min": 12653.373046875,
"max": 41890.890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989947.0,
"min": 29952.0,
"max": 989947.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989947.0,
"min": 29952.0,
"max": 989947.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4800989627838135,
"min": -0.11114373058080673,
"max": 0.4884772002696991,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.98741149902344,
"min": -26.7856388092041,
"max": 133.35427856445312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.002241783542558551,
"min": -0.010561153292655945,
"max": 0.5379161834716797,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.620974063873291,
"min": -2.714216470718384,
"max": 127.48613739013672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07037034384485305,
"min": 0.06437202470109259,
"max": 0.0731345800155061,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9851848138279428,
"min": 0.502732258066923,
"max": 1.0688624861182223,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01703332457597679,
"min": 0.0002493274566450033,
"max": 0.019140782018782977,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23846654406367507,
"min": 0.0027426020230950365,
"max": 0.23846654406367507,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.553090339478572e-06,
"min": 7.553090339478572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010574326475270001,
"min": 0.00010574326475270001,
"max": 0.0031183045605652,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025176642857143,
"min": 0.1025176642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352473000000001,
"min": 1.3886848,
"max": 2.4012577,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026151466214285716,
"min": 0.00026151466214285716,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036612052700000005,
"min": 0.0036612052700000005,
"max": 0.10395953652,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01295984536409378,
"min": 0.012820973061025143,
"max": 0.679510235786438,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18143783509731293,
"min": 0.17949362099170685,
"max": 4.7565717697143555,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 369.8024691358025,
"min": 369.8024691358025,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29954.0,
"min": 15984.0,
"max": 32702.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5561036778452955,
"min": -1.0000000521540642,
"max": 1.5561036778452955,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 126.04439790546894,
"min": -31.998401656746864,
"max": 126.04439790546894,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5561036778452955,
"min": -1.0000000521540642,
"max": 1.5561036778452955,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 126.04439790546894,
"min": -31.998401656746864,
"max": 126.04439790546894,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04989231763771206,
"min": 0.04989231763771206,
"max": 15.312699994072318,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.041277728654677,
"min": 3.7290663314051926,
"max": 245.0031999051571,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701506637",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701508798"
},
"total": 2160.814635838,
"count": 1,
"self": 0.47633285399979286,
"children": {
"run_training.setup": {
"total": 0.0564741690000119,
"count": 1,
"self": 0.0564741690000119
},
"TrainerController.start_learning": {
"total": 2160.281828815,
"count": 1,
"self": 1.293533785991258,
"children": {
"TrainerController._reset_env": {
"total": 3.0542734230000406,
"count": 1,
"self": 3.0542734230000406
},
"TrainerController.advance": {
"total": 2155.855418158009,
"count": 63608,
"self": 1.3700070660329402,
"children": {
"env_step": {
"total": 1528.711100438952,
"count": 63608,
"self": 1399.763529886891,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.12821340506707,
"count": 63608,
"self": 4.606953473115482,
"children": {
"TorchPolicy.evaluate": {
"total": 123.52125993195159,
"count": 62548,
"self": 123.52125993195159
}
}
},
"workers": {
"total": 0.8193571469937524,
"count": 63608,
"self": 0.0,
"children": {
"worker_root": {
"total": 2155.7607838490185,
"count": 63608,
"is_parallel": true,
"self": 870.8890439451261,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019685389997903258,
"count": 1,
"is_parallel": true,
"self": 0.0006750689997261361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012934700000641897,
"count": 8,
"is_parallel": true,
"self": 0.0012934700000641897
}
}
},
"UnityEnvironment.step": {
"total": 0.04957847700006823,
"count": 1,
"is_parallel": true,
"self": 0.0005759410003065568,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005140240000400809,
"count": 1,
"is_parallel": true,
"self": 0.0005140240000400809
},
"communicator.exchange": {
"total": 0.046764202999838744,
"count": 1,
"is_parallel": true,
"self": 0.046764202999838744
},
"steps_from_proto": {
"total": 0.001724308999882851,
"count": 1,
"is_parallel": true,
"self": 0.0003653239994036994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013589850004791515,
"count": 8,
"is_parallel": true,
"self": 0.0013589850004791515
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1284.8717399038924,
"count": 63607,
"is_parallel": true,
"self": 34.26148403886282,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.51589525399777,
"count": 63607,
"is_parallel": true,
"self": 23.51589525399777
},
"communicator.exchange": {
"total": 1130.4516078340107,
"count": 63607,
"is_parallel": true,
"self": 1130.4516078340107
},
"steps_from_proto": {
"total": 96.64275277702109,
"count": 63607,
"is_parallel": true,
"self": 19.114777276004588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.5279755010165,
"count": 508856,
"is_parallel": true,
"self": 77.5279755010165
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 625.7743106530238,
"count": 63608,
"self": 2.5571394420107936,
"children": {
"process_trajectory": {
"total": 124.11025991301608,
"count": 63608,
"self": 123.94205077201582,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1682091410002613,
"count": 2,
"self": 0.1682091410002613
}
}
},
"_update_policy": {
"total": 499.10691129799693,
"count": 443,
"self": 297.90363428296814,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.2032770150288,
"count": 22824,
"self": 201.2032770150288
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.570000318286475e-07,
"count": 1,
"self": 8.570000318286475e-07
},
"TrainerController._save_models": {
"total": 0.07860259100016265,
"count": 1,
"self": 0.0012719010001092101,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07733069000005344,
"count": 1,
"self": 0.07733069000005344
}
}
}
}
}
}
}