ppo-Huggy / run_logs /timers.json
MuQYY's picture
Huggy
095939e verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4040189981460571,
"min": 1.4040189981460571,
"max": 1.4268279075622559,
"count": 31
},
"Huggy.Policy.Entropy.sum": {
"value": 69767.109375,
"min": 68638.3828125,
"max": 75941.9375,
"count": 31
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.89493433395873,
"min": 80.9654605263158,
"max": 387.51162790697674,
"count": 31
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49513.0,
"min": 49227.0,
"max": 49989.0,
"count": 31
},
"Huggy.Step.mean": {
"value": 1549997.0,
"min": 49926.0,
"max": 1549997.0,
"count": 31
},
"Huggy.Step.sum": {
"value": 1549997.0,
"min": 49926.0,
"max": 1549997.0,
"count": 31
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4224023818969727,
"min": 0.01892710104584694,
"max": 2.475965976715088,
"count": 31
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1291.1405029296875,
"min": 2.422668933868408,
"max": 1490.8651123046875,
"count": 31
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6699236880771213,
"min": 1.90479666297324,
"max": 3.9034374294646494,
"count": 31
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1956.0693257451057,
"min": 243.81397286057472,
"max": 2357.225673496723,
"count": 31
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6699236880771213,
"min": 1.90479666297324,
"max": 3.9034374294646494,
"count": 31
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1956.0693257451057,
"min": 243.81397286057472,
"max": 2357.225673496723,
"count": 31
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01765583886622658,
"min": 0.012048271496132655,
"max": 0.020178400809203998,
"count": 31
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052967516598679744,
"min": 0.02409654299226531,
"max": 0.060535202427612,
"count": 31
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0456760737631056,
"min": 0.021437638501326243,
"max": 0.059940358685950434,
"count": 31
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1370282212893168,
"min": 0.042875277002652486,
"max": 0.17843094989657404,
"count": 31
},
"Huggy.Policy.LearningRate.mean": {
"value": 7.137182620941666e-05,
"min": 7.137182620941666e-05,
"max": 0.00029530732656422496,
"count": 31
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.00021411547862824995,
"min": 0.00015828479723844997,
"max": 0.0008438181187273,
"count": 31
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.12379058333333333,
"min": 0.12379058333333333,
"max": 0.198435775,
"count": 31
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.37137175,
"min": 0.25276155000000006,
"max": 0.5812727000000002,
"count": 31
},
"Huggy.Policy.Beta.mean": {
"value": 0.0011971501083333337,
"min": 0.0011971501083333337,
"max": 0.004921945172499999,
"count": 31
},
"Huggy.Policy.Beta.sum": {
"value": 0.0035914503250000012,
"min": 0.002652801345000001,
"max": 0.014065507730000001,
"count": 31
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 31
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 31
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1757694355",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1757696285"
},
"total": 1929.996441419,
"count": 1,
"self": 0.3412236849999317,
"children": {
"run_training.setup": {
"total": 0.025660714999958145,
"count": 1,
"self": 0.025660714999958145
},
"TrainerController.start_learning": {
"total": 1929.629557019,
"count": 1,
"self": 3.400590224108555,
"children": {
"TrainerController._reset_env": {
"total": 3.3706015610000577,
"count": 1,
"self": 3.3706015610000577
},
"TrainerController.advance": {
"total": 1922.7172068068915,
"count": 183530,
"self": 3.4513341638601105,
"children": {
"env_step": {
"total": 1552.9344393000613,
"count": 183530,
"self": 1233.640846484057,
"children": {
"SubprocessEnvManager._take_step": {
"total": 317.13290072195616,
"count": 183530,
"self": 12.578718728916101,
"children": {
"TorchPolicy.evaluate": {
"total": 304.55418199304006,
"count": 176258,
"self": 304.55418199304006
}
}
},
"workers": {
"total": 2.160692094048045,
"count": 183529,
"self": 0.0,
"children": {
"worker_root": {
"total": 1923.7909661350443,
"count": 183529,
"is_parallel": true,
"self": 918.0273041559898,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008735010000009424,
"count": 1,
"is_parallel": true,
"self": 0.0002793039999460234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000594197000054919,
"count": 2,
"is_parallel": true,
"self": 0.000594197000054919
}
}
},
"UnityEnvironment.step": {
"total": 0.03484531100002641,
"count": 1,
"is_parallel": true,
"self": 0.00033346300006087404,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019502800000736897,
"count": 1,
"is_parallel": true,
"self": 0.00019502800000736897
},
"communicator.exchange": {
"total": 0.033590697000022374,
"count": 1,
"is_parallel": true,
"self": 0.033590697000022374
},
"steps_from_proto": {
"total": 0.0007261229999357965,
"count": 1,
"is_parallel": true,
"self": 0.00021993199993630697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005061909999994896,
"count": 2,
"is_parallel": true,
"self": 0.0005061909999994896
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1005.7636619790545,
"count": 183528,
"is_parallel": true,
"self": 29.713323106092048,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 65.7808813969757,
"count": 183528,
"is_parallel": true,
"self": 65.7808813969757
},
"communicator.exchange": {
"total": 840.1819311039676,
"count": 183528,
"is_parallel": true,
"self": 840.1819311039676
},
"steps_from_proto": {
"total": 70.08752637201917,
"count": 183528,
"is_parallel": true,
"self": 26.025656902046194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 44.061869469972976,
"count": 367056,
"is_parallel": true,
"self": 44.061869469972976
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 366.33143334297006,
"count": 183529,
"self": 5.246124651926493,
"children": {
"process_trajectory": {
"total": 121.90756542204565,
"count": 183529,
"self": 121.03147177704591,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8760936449997416,
"count": 7,
"self": 0.8760936449997416
}
}
},
"_update_policy": {
"total": 239.17774326899792,
"count": 76,
"self": 191.15551646500307,
"children": {
"TorchPPOOptimizer.update": {
"total": 48.02222680399484,
"count": 2280,
"self": 48.02222680399484
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.374000021314714e-06,
"count": 1,
"self": 1.374000021314714e-06
},
"TrainerController._save_models": {
"total": 0.14115705299991532,
"count": 1,
"self": 0.002236742999684793,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13892031000023053,
"count": 1,
"self": 0.13892031000023053
}
}
}
}
}
}
}