ppo-pyramids / run_logs /timers.json
junruzhang's picture
First Push
7b25414 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30228570103645325,
"min": 0.2922554016113281,
"max": 1.4273685216903687,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9121.7734375,
"min": 8795.71875,
"max": 43300.65234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989893.0,
"min": 29952.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989893.0,
"min": 29952.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4993949830532074,
"min": -0.10080023854970932,
"max": 0.5302249193191528,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 137.3336181640625,
"min": -24.292858123779297,
"max": 145.84133911132812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009546229615807533,
"min": 0.009546229615807533,
"max": 0.5653338432312012,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.625213146209717,
"min": 2.625213146209717,
"max": 136.24545288085938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0704258401867254,
"min": 0.06564920250370744,
"max": 0.07353193825803742,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9859617626141556,
"min": 0.5011892835604597,
"max": 1.0805387482202302,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015240127936035592,
"min": 0.0007548253644344879,
"max": 0.031921972516491745,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2133617911044983,
"min": 0.009812729737648343,
"max": 0.23032031048690746,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.445776089535715e-06,
"min": 7.445776089535715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010424086525350001,
"min": 0.00010424086525350001,
"max": 0.0035081915306028996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248189285714289,
"min": 0.10248189285714289,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347465000000004,
"min": 1.3886848,
"max": 2.5693971,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002579410964285715,
"min": 0.0002579410964285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003611175350000001,
"min": 0.003611175350000001,
"max": 0.11696277028999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0202634334564209,
"min": 0.020239263772964478,
"max": 0.8803380727767944,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2836880683898926,
"min": 0.2833496928215027,
"max": 6.1623663902282715,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 415.82191780821915,
"min": 354.5421686746988,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30355.0,
"min": 15984.0,
"max": 32694.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4745671083665874,
"min": -1.0000000521540642,
"max": 1.6029640869834485,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 107.64339891076088,
"min": -29.996201559901237,
"max": 131.8033990561962,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4745671083665874,
"min": -1.0000000521540642,
"max": 1.6029640869834485,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 107.64339891076088,
"min": -29.996201559901237,
"max": 131.8033990561962,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08695330872512996,
"min": 0.07410621169550591,
"max": 18.8011967074126,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.3475915369344875,
"min": 6.150815570726991,
"max": 300.8191473186016,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1757289923",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1757292048"
},
"total": 2124.3790936570003,
"count": 1,
"self": 0.47582563400055733,
"children": {
"run_training.setup": {
"total": 0.02222806899999341,
"count": 1,
"self": 0.02222806899999341
},
"TrainerController.start_learning": {
"total": 2123.881039954,
"count": 1,
"self": 1.2974695059738224,
"children": {
"TrainerController._reset_env": {
"total": 2.0182314719997976,
"count": 1,
"self": 2.0182314719997976
},
"TrainerController.advance": {
"total": 2120.482685789026,
"count": 63852,
"self": 1.300398795998717,
"children": {
"env_step": {
"total": 1481.3905273329713,
"count": 63852,
"self": 1339.373160293856,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.26178062111308,
"count": 63852,
"self": 4.428549548046703,
"children": {
"TorchPolicy.evaluate": {
"total": 136.83323107306637,
"count": 62544,
"self": 136.83323107306637
}
}
},
"workers": {
"total": 0.7555864180021672,
"count": 63852,
"self": 0.0,
"children": {
"worker_root": {
"total": 2119.1676637719515,
"count": 63852,
"is_parallel": true,
"self": 886.8468092649969,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017102399999657791,
"count": 1,
"is_parallel": true,
"self": 0.000574200000073688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011360399998920911,
"count": 8,
"is_parallel": true,
"self": 0.0011360399998920911
}
}
},
"UnityEnvironment.step": {
"total": 0.04721835400005148,
"count": 1,
"is_parallel": true,
"self": 0.0005249560001630016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047397799994541856,
"count": 1,
"is_parallel": true,
"self": 0.00047397799994541856
},
"communicator.exchange": {
"total": 0.04462082600002759,
"count": 1,
"is_parallel": true,
"self": 0.04462082600002759
},
"steps_from_proto": {
"total": 0.0015985939999154652,
"count": 1,
"is_parallel": true,
"self": 0.00034625199987203814,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001252342000043427,
"count": 8,
"is_parallel": true,
"self": 0.001252342000043427
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1232.3208545069547,
"count": 63851,
"is_parallel": true,
"self": 30.826882515025545,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.14953346591915,
"count": 63851,
"is_parallel": true,
"self": 22.14953346591915
},
"communicator.exchange": {
"total": 1085.6197129609957,
"count": 63851,
"is_parallel": true,
"self": 1085.6197129609957
},
"steps_from_proto": {
"total": 93.7247255650143,
"count": 63851,
"is_parallel": true,
"self": 18.540250841859688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.18447472315461,
"count": 510808,
"is_parallel": true,
"self": 75.18447472315461
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.791759660056,
"count": 63852,
"self": 2.496235900069223,
"children": {
"process_trajectory": {
"total": 123.7634680299891,
"count": 63852,
"self": 123.5742518619893,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18921616799980256,
"count": 2,
"self": 0.18921616799980256
}
}
},
"_update_policy": {
"total": 511.53205572999764,
"count": 456,
"self": 285.78371227295634,
"children": {
"TorchPPOOptimizer.update": {
"total": 225.7483434570413,
"count": 22818,
"self": 225.7483434570413
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0069998097606003e-06,
"count": 1,
"self": 1.0069998097606003e-06
},
"TrainerController._save_models": {
"total": 0.08265217999996821,
"count": 1,
"self": 0.0013219829997979105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0813301970001703,
"count": 1,
"self": 0.0813301970001703
}
}
}
}
}
}
}