ppo-Huggy / run_logs /timers.json
willtai's picture
Huggy
c54ab8d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407739281654358,
"min": 1.407739281654358,
"max": 1.4287375211715698,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69401.546875,
"min": 69142.1484375,
"max": 77243.03125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 78.69426751592357,
"min": 73.14264487369985,
"max": 416.3833333333333,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49420.0,
"min": 48980.0,
"max": 50444.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49433.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49433.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4733822345733643,
"min": 0.030028635635972023,
"max": 2.504939317703247,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1553.2840576171875,
"min": 3.5734076499938965,
"max": 1676.968505859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8664052945793053,
"min": 1.8855790395696623,
"max": 3.9842809721803434,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2428.102524995804,
"min": 224.38390570878983,
"max": 2538.094226539135,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8664052945793053,
"min": 1.8855790395696623,
"max": 3.9842809721803434,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2428.102524995804,
"min": 224.38390570878983,
"max": 2538.094226539135,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017164004715191875,
"min": 0.012240524147152125,
"max": 0.019906813182630382,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051492014145575624,
"min": 0.02448104829430425,
"max": 0.05864690324330392,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055464791589313084,
"min": 0.02318651423686081,
"max": 0.0631645099984275,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16639437476793925,
"min": 0.04667707917590936,
"max": 0.1894935299952825,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7687487437833284e-06,
"min": 3.7687487437833284e-06,
"max": 0.000295255726581425,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1306246231349985e-05,
"min": 1.1306246231349985e-05,
"max": 0.0008438965687011502,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125621666666666,
"min": 0.10125621666666666,
"max": 0.19841857500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30376865,
"min": 0.20764085000000004,
"max": 0.58129885,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.268521166666662e-05,
"min": 7.268521166666662e-05,
"max": 0.0049210868925,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021805563499999983,
"min": 0.00021805563499999983,
"max": 0.014066812615000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673723537",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673725720"
},
"total": 2182.798464509,
"count": 1,
"self": 0.38888667800029,
"children": {
"run_training.setup": {
"total": 0.12135847599995486,
"count": 1,
"self": 0.12135847599995486
},
"TrainerController.start_learning": {
"total": 2182.288219355,
"count": 1,
"self": 3.749341576950883,
"children": {
"TrainerController._reset_env": {
"total": 10.115605587000005,
"count": 1,
"self": 10.115605587000005
},
"TrainerController.advance": {
"total": 2168.296639211049,
"count": 232965,
"self": 4.086169911054185,
"children": {
"env_step": {
"total": 1705.759141917002,
"count": 232965,
"self": 1432.9580211199877,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.2763297409517,
"count": 232965,
"self": 14.03661146193565,
"children": {
"TorchPolicy.evaluate": {
"total": 256.2397182790161,
"count": 222894,
"self": 65.49958332301696,
"children": {
"TorchPolicy.sample_actions": {
"total": 190.7401349559991,
"count": 222894,
"self": 190.7401349559991
}
}
}
}
},
"workers": {
"total": 2.524791056062327,
"count": 232965,
"self": 0.0,
"children": {
"worker_root": {
"total": 2173.4178950879527,
"count": 232965,
"is_parallel": true,
"self": 992.0438158450261,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005540362000033383,
"count": 1,
"is_parallel": true,
"self": 0.0003482160001340162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005192145999899367,
"count": 2,
"is_parallel": true,
"self": 0.005192145999899367
}
}
},
"UnityEnvironment.step": {
"total": 0.02842696799996247,
"count": 1,
"is_parallel": true,
"self": 0.0003066769999122698,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020406200007982989,
"count": 1,
"is_parallel": true,
"self": 0.00020406200007982989
},
"communicator.exchange": {
"total": 0.026940417999981037,
"count": 1,
"is_parallel": true,
"self": 0.026940417999981037
},
"steps_from_proto": {
"total": 0.000975810999989335,
"count": 1,
"is_parallel": true,
"self": 0.0003026560000307654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006731549999585695,
"count": 2,
"is_parallel": true,
"self": 0.0006731549999585695
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1181.3740792429267,
"count": 232964,
"is_parallel": true,
"self": 34.687032502987904,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.69763915989108,
"count": 232964,
"is_parallel": true,
"self": 77.69763915989108
},
"communicator.exchange": {
"total": 974.7998519650837,
"count": 232964,
"is_parallel": true,
"self": 974.7998519650837
},
"steps_from_proto": {
"total": 94.18955561496409,
"count": 232964,
"is_parallel": true,
"self": 39.4389524889466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.750603126017495,
"count": 465928,
"is_parallel": true,
"self": 54.750603126017495
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 458.45132738299264,
"count": 232965,
"self": 5.3791340399826595,
"children": {
"process_trajectory": {
"total": 145.89643084700947,
"count": 232965,
"self": 144.57939405700904,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3170367900004294,
"count": 10,
"self": 1.3170367900004294
}
}
},
"_update_policy": {
"total": 307.1757624960005,
"count": 97,
"self": 254.66811320499676,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.50764929100376,
"count": 2910,
"self": 52.50764929100376
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0630001270328648e-06,
"count": 1,
"self": 1.0630001270328648e-06
},
"TrainerController._save_models": {
"total": 0.12663191699994059,
"count": 1,
"self": 0.002003543999762769,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12462837300017782,
"count": 1,
"self": 0.12462837300017782
}
}
}
}
}
}
}