ppo-PyramidsRND / run_logs /timers.json
Vagnus's picture
First Push
634d613 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5585489273071289,
"min": 0.5516825914382935,
"max": 1.433143973350525,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16792.21484375,
"min": 16362.4990234375,
"max": 43475.85546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989964.0,
"min": 29952.0,
"max": 989964.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989964.0,
"min": 29952.0,
"max": 989964.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2760472297668457,
"min": -0.10399471968412399,
"max": 0.2844479978084564,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 71.49623107910156,
"min": -24.95873260498047,
"max": 74.24092864990234,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.002693533431738615,
"min": 0.0005512988427653909,
"max": 0.4077441990375519,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.6976251602172852,
"min": 0.13837601244449615,
"max": 96.6353759765625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0701080114056822,
"min": 0.06452763149247723,
"max": 0.07518882900649933,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0516201710852329,
"min": 0.5263218030454953,
"max": 1.0516201710852329,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01172170010071972,
"min": 0.00013222160973721026,
"max": 0.012953916126069763,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17582550151079582,
"min": 0.0018511025363209435,
"max": 0.1813548257649767,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.550897483066671e-06,
"min": 7.550897483066671e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011326346224600007,
"min": 0.00011326346224600007,
"max": 0.003374960575013199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251693333333334,
"min": 0.10251693333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.537754,
"min": 1.3886848,
"max": 2.4850783,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002614416400000002,
"min": 0.0002614416400000002,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003921624600000003,
"min": 0.003921624600000003,
"max": 0.11251618132,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016451720148324966,
"min": 0.016451720148324966,
"max": 0.5526224374771118,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2467758059501648,
"min": 0.237635537981987,
"max": 3.8683571815490723,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 585.0,
"min": 531.574074074074,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32760.0,
"min": 15984.0,
"max": 33022.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1622181549668311,
"min": -1.0000000521540642,
"max": 1.2461481243371964,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 63.921998523175716,
"min": -30.996601596474648,
"max": 67.2919987142086,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1622181549668311,
"min": -1.0000000521540642,
"max": 1.2461481243371964,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 63.921998523175716,
"min": -30.996601596474648,
"max": 67.2919987142086,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09873100745661015,
"min": 0.09448294828379639,
"max": 11.09983042627573,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.430205410113558,
"min": 5.102079207325005,
"max": 177.59728682041168,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1727189735",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1727193096"
},
"total": 3361.743825507,
"count": 1,
"self": 0.6578675920009118,
"children": {
"run_training.setup": {
"total": 0.07461541999987276,
"count": 1,
"self": 0.07461541999987276
},
"TrainerController.start_learning": {
"total": 3361.0113424949996,
"count": 1,
"self": 2.5310426508949604,
"children": {
"TrainerController._reset_env": {
"total": 2.927751212999965,
"count": 1,
"self": 2.927751212999965
},
"TrainerController.advance": {
"total": 3355.470109205105,
"count": 63355,
"self": 2.59331382413302,
"children": {
"env_step": {
"total": 2173.3011444348886,
"count": 63355,
"self": 1999.6405767949873,
"children": {
"SubprocessEnvManager._take_step": {
"total": 172.17572298587265,
"count": 63355,
"self": 7.694169884903204,
"children": {
"TorchPolicy.evaluate": {
"total": 164.48155310096945,
"count": 62567,
"self": 164.48155310096945
}
}
},
"workers": {
"total": 1.4848446540286204,
"count": 63355,
"self": 0.0,
"children": {
"worker_root": {
"total": 3353.3496659049983,
"count": 63355,
"is_parallel": true,
"self": 1547.5990844539974,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025841260001016053,
"count": 1,
"is_parallel": true,
"self": 0.0008530700001756486,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017310559999259567,
"count": 8,
"is_parallel": true,
"self": 0.0017310559999259567
}
}
},
"UnityEnvironment.step": {
"total": 0.10892575599996235,
"count": 1,
"is_parallel": true,
"self": 0.0007799439999871538,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005323820000739943,
"count": 1,
"is_parallel": true,
"self": 0.0005323820000739943
},
"communicator.exchange": {
"total": 0.10545368799989774,
"count": 1,
"is_parallel": true,
"self": 0.10545368799989774
},
"steps_from_proto": {
"total": 0.0021597420000034617,
"count": 1,
"is_parallel": true,
"self": 0.0003908269998191827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001768915000184279,
"count": 8,
"is_parallel": true,
"self": 0.001768915000184279
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1805.750581451001,
"count": 63354,
"is_parallel": true,
"self": 51.9025177649944,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.270352153946988,
"count": 63354,
"is_parallel": true,
"self": 31.270352153946988
},
"communicator.exchange": {
"total": 1591.9770203299545,
"count": 63354,
"is_parallel": true,
"self": 1591.9770203299545
},
"steps_from_proto": {
"total": 130.60069120210505,
"count": 63354,
"is_parallel": true,
"self": 28.673627834974923,
"children": {
"_process_rank_one_or_two_observation": {
"total": 101.92706336713013,
"count": 506832,
"is_parallel": true,
"self": 101.92706336713013
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1179.5756509460837,
"count": 63355,
"self": 4.788921217142843,
"children": {
"process_trajectory": {
"total": 173.948135474938,
"count": 63355,
"self": 173.7613137749379,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18682170000010956,
"count": 2,
"self": 0.18682170000010956
}
}
},
"_update_policy": {
"total": 1000.8385942540028,
"count": 449,
"self": 395.6732891570307,
"children": {
"TorchPPOOptimizer.update": {
"total": 605.1653050969721,
"count": 22770,
"self": 605.1653050969721
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0510002539376728e-06,
"count": 1,
"self": 1.0510002539376728e-06
},
"TrainerController._save_models": {
"total": 0.08243837499958317,
"count": 1,
"self": 0.0021922239993728,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08024615100021038,
"count": 1,
"self": 0.08024615100021038
}
}
}
}
}
}
}