ppo-Pyramids / run_logs /timers.json
CrazyAIGC's picture
First Push
b53185c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36157143115997314,
"min": 0.3592761158943176,
"max": 1.4545283317565918,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10887.638671875,
"min": 10812.7744140625,
"max": 44124.5703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989924.0,
"min": 29902.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989924.0,
"min": 29902.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5999639630317688,
"min": -0.13866865634918213,
"max": 0.6500394344329834,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.3899383544922,
"min": -33.00313949584961,
"max": 187.2113494873047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03151479363441467,
"min": -0.01749282516539097,
"max": 0.30873727798461914,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.792627334594727,
"min": -5.037933826446533,
"max": 73.4794692993164,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06636647236744118,
"min": 0.06483748087789751,
"max": 0.07158446299514298,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9291306131441767,
"min": 0.4880446261633823,
"max": 1.047673749504611,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013954396296737418,
"min": 0.0009856462528282858,
"max": 0.015372815381342457,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19536154815432386,
"min": 0.013799047539596002,
"max": 0.2152194153387944,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.471126081085712e-06,
"min": 7.471126081085712e-06,
"max": 0.00029523505873117143,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010459576513519996,
"min": 0.00010459576513519996,
"max": 0.0037605448464851,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249034285714287,
"min": 0.10249034285714287,
"max": 0.19841168571428572,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348648000000002,
"min": 1.3888818,
"max": 2.6535149000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025878525142857136,
"min": 0.00025878525142857136,
"max": 0.009841327402857142,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003622993519999999,
"min": 0.003622993519999999,
"max": 0.12536613851,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012087264098227024,
"min": 0.012087264098227024,
"max": 0.3987515866756439,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1692216992378235,
"min": 0.1692216992378235,
"max": 2.7912611961364746,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 293.01,
"min": 293.01,
"max": 989.6129032258065,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29301.0,
"min": 17084.0,
"max": 32538.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7072475015526951,
"min": -0.8566933840513229,
"max": 1.7072475015526951,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 172.4319976568222,
"min": -28.158801659941673,
"max": 176.37219781428576,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7072475015526951,
"min": -0.8566933840513229,
"max": 1.7072475015526951,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 172.4319976568222,
"min": -28.158801659941673,
"max": 176.37219781428576,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03669696707450808,
"min": 0.03669696707450808,
"max": 6.840406454685661,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.706393674525316,
"min": 3.706393674525316,
"max": 123.12731618434191,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739955867",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739958136"
},
"total": 2268.8213013179998,
"count": 1,
"self": 0.4771902179995777,
"children": {
"run_training.setup": {
"total": 0.01990708299990729,
"count": 1,
"self": 0.01990708299990729
},
"TrainerController.start_learning": {
"total": 2268.3242040170003,
"count": 1,
"self": 1.3362652679379607,
"children": {
"TrainerController._reset_env": {
"total": 2.0897173049997946,
"count": 1,
"self": 2.0897173049997946
},
"TrainerController.advance": {
"total": 2264.795264996063,
"count": 64061,
"self": 1.4185678959161123,
"children": {
"env_step": {
"total": 1594.7375583660119,
"count": 64061,
"self": 1442.8241301451485,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.1471358699314,
"count": 64061,
"self": 4.633315160974689,
"children": {
"TorchPolicy.evaluate": {
"total": 146.5138207089567,
"count": 62567,
"self": 146.5138207089567
}
}
},
"workers": {
"total": 0.7662923509319626,
"count": 64061,
"self": 0.0,
"children": {
"worker_root": {
"total": 2263.5375376559905,
"count": 64061,
"is_parallel": true,
"self": 932.7515159869636,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020783679997293802,
"count": 1,
"is_parallel": true,
"self": 0.000665024999761954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014133429999674263,
"count": 8,
"is_parallel": true,
"self": 0.0014133429999674263
}
}
},
"UnityEnvironment.step": {
"total": 0.050326257000051555,
"count": 1,
"is_parallel": true,
"self": 0.0005306690004545089,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004982379996363306,
"count": 1,
"is_parallel": true,
"self": 0.0004982379996363306
},
"communicator.exchange": {
"total": 0.047714834999624145,
"count": 1,
"is_parallel": true,
"self": 0.047714834999624145
},
"steps_from_proto": {
"total": 0.0015825150003365707,
"count": 1,
"is_parallel": true,
"self": 0.00034737000032691867,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001235145000009652,
"count": 8,
"is_parallel": true,
"self": 0.001235145000009652
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1330.786021669027,
"count": 64060,
"is_parallel": true,
"self": 31.6866859140614,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.53674646094578,
"count": 64060,
"is_parallel": true,
"self": 23.53674646094578
},
"communicator.exchange": {
"total": 1178.6292775270367,
"count": 64060,
"is_parallel": true,
"self": 1178.6292775270367
},
"steps_from_proto": {
"total": 96.93331176698302,
"count": 64060,
"is_parallel": true,
"self": 19.333609221078405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.59970254590462,
"count": 512480,
"is_parallel": true,
"self": 77.59970254590462
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 668.639138734135,
"count": 64061,
"self": 2.6484782451971114,
"children": {
"process_trajectory": {
"total": 129.4507896049372,
"count": 64061,
"self": 129.1384000429366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31238956200058965,
"count": 2,
"self": 0.31238956200058965
}
}
},
"_update_policy": {
"total": 536.5398708840007,
"count": 461,
"self": 294.53245716201127,
"children": {
"TorchPPOOptimizer.update": {
"total": 242.00741372198945,
"count": 22779,
"self": 242.00741372198945
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0309995559509844e-06,
"count": 1,
"self": 1.0309995559509844e-06
},
"TrainerController._save_models": {
"total": 0.10295541699997557,
"count": 1,
"self": 0.0019328200005475082,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10102259699942806,
"count": 1,
"self": 0.10102259699942806
}
}
}
}
}
}
}