PyramidsRND / run_logs /timers.json
Adi0010's picture
First Push
edf3d2b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36324408650398254,
"min": 0.36263951659202576,
"max": 1.321979284286499,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10903.134765625,
"min": 10821.1630859375,
"max": 40103.5625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989932.0,
"min": 29952.0,
"max": 989932.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989932.0,
"min": 29952.0,
"max": 989932.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48503291606903076,
"min": -0.26888924837112427,
"max": 0.488682359457016,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.89901733398438,
"min": -63.72675323486328,
"max": 132.89901733398438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0131208011880517,
"min": -0.08916240930557251,
"max": 0.6215468049049377,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.595099449157715,
"min": -24.163013458251953,
"max": 147.3065948486328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07063328150761868,
"min": 0.06440895283407223,
"max": 0.07746857166291621,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9888659411066614,
"min": 0.4902344580017828,
"max": 1.0608330956778909,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01458585456269092,
"min": 0.0009905943323932377,
"max": 0.018198541327553268,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2042019638776729,
"min": 0.009222383719918134,
"max": 0.23667086330533496,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.360704689321429e-06,
"min": 7.360704689321429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010304986565050001,
"min": 0.00010304986565050001,
"max": 0.0036327052890983,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245353571428571,
"min": 0.10245353571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343495,
"min": 1.3691136000000002,
"max": 2.6109017000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025510821785714285,
"min": 0.00025510821785714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035715150499999997,
"min": 0.0035715150499999997,
"max": 0.12110907983000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013991400599479675,
"min": 0.01376026589423418,
"max": 0.8307117819786072,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19587960839271545,
"min": 0.19264371693134308,
"max": 5.8149824142456055,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 388.64102564102564,
"min": 376.0379746835443,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30314.0,
"min": 15984.0,
"max": 33693.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4831102354786334,
"min": -1.0000000521540642,
"max": 1.5226708573253849,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 115.68259836733341,
"min": -32.000001668930054,
"max": 120.2909977287054,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4831102354786334,
"min": -1.0000000521540642,
"max": 1.5226708573253849,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 115.68259836733341,
"min": -32.000001668930054,
"max": 120.2909977287054,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.058142706270117715,
"min": 0.05765584434072177,
"max": 17.78527825511992,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.535131089069182,
"min": 3.854330282250885,
"max": 284.5644520819187,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683287829",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683290154"
},
"total": 2325.601415362,
"count": 1,
"self": 0.5388499529999535,
"children": {
"run_training.setup": {
"total": 0.06192000999999436,
"count": 1,
"self": 0.06192000999999436
},
"TrainerController.start_learning": {
"total": 2325.000645399,
"count": 1,
"self": 1.7763268789849462,
"children": {
"TrainerController._reset_env": {
"total": 5.375168252999856,
"count": 1,
"self": 5.375168252999856
},
"TrainerController.advance": {
"total": 2317.7536351380154,
"count": 63693,
"self": 1.7192676680028853,
"children": {
"env_step": {
"total": 1649.9523592339963,
"count": 63693,
"self": 1519.3762891180625,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.52106960588367,
"count": 63693,
"self": 5.544092186989019,
"children": {
"TorchPolicy.evaluate": {
"total": 123.97697741889465,
"count": 62548,
"self": 123.97697741889465
}
}
},
"workers": {
"total": 1.0550005100501494,
"count": 63693,
"self": 0.0,
"children": {
"worker_root": {
"total": 2319.0847151349526,
"count": 63693,
"is_parallel": true,
"self": 932.7682028139902,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026921520000087185,
"count": 1,
"is_parallel": true,
"self": 0.0006244429996513645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002067709000357354,
"count": 8,
"is_parallel": true,
"self": 0.002067709000357354
}
}
},
"UnityEnvironment.step": {
"total": 0.052274057000204266,
"count": 1,
"is_parallel": true,
"self": 0.0006162810002479091,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005096029999549501,
"count": 1,
"is_parallel": true,
"self": 0.0005096029999549501
},
"communicator.exchange": {
"total": 0.04747238599998127,
"count": 1,
"is_parallel": true,
"self": 0.04747238599998127
},
"steps_from_proto": {
"total": 0.0036757870000201365,
"count": 1,
"is_parallel": true,
"self": 0.002088824000338718,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015869629996814183,
"count": 8,
"is_parallel": true,
"self": 0.0015869629996814183
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1386.3165123209624,
"count": 63692,
"is_parallel": true,
"self": 34.539465346095994,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.103750257000684,
"count": 63692,
"is_parallel": true,
"self": 26.103750257000684
},
"communicator.exchange": {
"total": 1212.9642471269021,
"count": 63692,
"is_parallel": true,
"self": 1212.9642471269021
},
"steps_from_proto": {
"total": 112.70904959096356,
"count": 63692,
"is_parallel": true,
"self": 23.835787294889315,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.87326229607424,
"count": 509536,
"is_parallel": true,
"self": 88.87326229607424
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.0820082360162,
"count": 63693,
"self": 3.196570981982404,
"children": {
"process_trajectory": {
"total": 118.86490918003506,
"count": 63693,
"self": 118.63943903803533,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22547014199972182,
"count": 2,
"self": 0.22547014199972182
}
}
},
"_update_policy": {
"total": 544.0205280739988,
"count": 450,
"self": 351.8034543890485,
"children": {
"TorchPPOOptimizer.update": {
"total": 192.21707368495026,
"count": 22809,
"self": 192.21707368495026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2399996194289997e-06,
"count": 1,
"self": 1.2399996194289997e-06
},
"TrainerController._save_models": {
"total": 0.09551388900035818,
"count": 1,
"self": 0.0014478570001301705,
"children": {
"RLTrainer._checkpoint": {
"total": 0.094066032000228,
"count": 1,
"self": 0.094066032000228
}
}
}
}
}
}
}