ppo-Huggy / run_logs /timers.json
unit0113's picture
Huggy
941b99b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4038658142089844,
"min": 1.4038658142089844,
"max": 1.4268333911895752,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69317.28125,
"min": 69288.875,
"max": 75957.8125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.7542372881356,
"min": 79.23916532905297,
"max": 403.88709677419354,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49415.0,
"min": 49067.0,
"max": 50082.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999929.0,
"min": 49772.0,
"max": 1999929.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999929.0,
"min": 49772.0,
"max": 1999929.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4456441402435303,
"min": 0.04941123351454735,
"max": 2.481194496154785,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1442.9300537109375,
"min": 6.077581882476807,
"max": 1488.530029296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.814109287928727,
"min": 1.7259405109940507,
"max": 3.944212555567706,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2250.3244798779488,
"min": 212.29068285226822,
"max": 2347.354266703129,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.814109287928727,
"min": 1.7259405109940507,
"max": 3.944212555567706,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2250.3244798779488,
"min": 212.29068285226822,
"max": 2347.354266703129,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01609654629117156,
"min": 0.013173334930615965,
"max": 0.018989478482399135,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04828963887351468,
"min": 0.02634666986123193,
"max": 0.05696843544719741,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05390241949094667,
"min": 0.022158219323803983,
"max": 0.06175407643119494,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16170725847284,
"min": 0.04431643864760797,
"max": 0.17534390973548095,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5509988163666686e-06,
"min": 3.5509988163666686e-06,
"max": 0.00029529067656977495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0652996449100006e-05,
"min": 1.0652996449100006e-05,
"max": 0.00084375556874815,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118363333333331,
"min": 0.10118363333333331,
"max": 0.19843022500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30355089999999996,
"min": 0.20751740000000007,
"max": 0.5812518500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.906330333333337e-05,
"min": 6.906330333333337e-05,
"max": 0.004921668227500002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020718991000000012,
"min": 0.00020718991000000012,
"max": 0.014064467314999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676223432",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676225956"
},
"total": 2523.5854788859997,
"count": 1,
"self": 0.5393221419994916,
"children": {
"run_training.setup": {
"total": 0.19215496099997154,
"count": 1,
"self": 0.19215496099997154
},
"TrainerController.start_learning": {
"total": 2522.854001783,
"count": 1,
"self": 4.418777423002666,
"children": {
"TrainerController._reset_env": {
"total": 11.40423907799999,
"count": 1,
"self": 11.40423907799999
},
"TrainerController.advance": {
"total": 2506.8506657559974,
"count": 232390,
"self": 4.899656671002049,
"children": {
"env_step": {
"total": 1963.087109864978,
"count": 232390,
"self": 1638.6365112299954,
"children": {
"SubprocessEnvManager._take_step": {
"total": 321.3767639690362,
"count": 232390,
"self": 17.026368506094514,
"children": {
"TorchPolicy.evaluate": {
"total": 304.35039546294166,
"count": 222878,
"self": 75.01748193184983,
"children": {
"TorchPolicy.sample_actions": {
"total": 229.33291353109183,
"count": 222878,
"self": 229.33291353109183
}
}
}
}
},
"workers": {
"total": 3.073834665946265,
"count": 232390,
"self": 0.0,
"children": {
"worker_root": {
"total": 2513.49857343499,
"count": 232390,
"is_parallel": true,
"self": 1184.1970566710029,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020728800000142655,
"count": 1,
"is_parallel": true,
"self": 0.0003468169999791826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001726063000035083,
"count": 2,
"is_parallel": true,
"self": 0.001726063000035083
}
}
},
"UnityEnvironment.step": {
"total": 0.030471025999986523,
"count": 1,
"is_parallel": true,
"self": 0.00030274499999904947,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002172979999954805,
"count": 1,
"is_parallel": true,
"self": 0.0002172979999954805
},
"communicator.exchange": {
"total": 0.02921755500000245,
"count": 1,
"is_parallel": true,
"self": 0.02921755500000245
},
"steps_from_proto": {
"total": 0.0007334279999895443,
"count": 1,
"is_parallel": true,
"self": 0.00025379099992051124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004796370000690331,
"count": 2,
"is_parallel": true,
"self": 0.0004796370000690331
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1329.301516763987,
"count": 232389,
"is_parallel": true,
"self": 39.72485708706199,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.82095554698475,
"count": 232389,
"is_parallel": true,
"self": 84.82095554698475
},
"communicator.exchange": {
"total": 1106.3269667719296,
"count": 232389,
"is_parallel": true,
"self": 1106.3269667719296
},
"steps_from_proto": {
"total": 98.42873735801061,
"count": 232389,
"is_parallel": true,
"self": 42.65141056587669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.77732679213392,
"count": 464778,
"is_parallel": true,
"self": 55.77732679213392
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 538.8638992200174,
"count": 232390,
"self": 6.764144103045169,
"children": {
"process_trajectory": {
"total": 172.3237938299709,
"count": 232390,
"self": 171.06538085897114,
"children": {
"RLTrainer._checkpoint": {
"total": 1.258412970999757,
"count": 10,
"self": 1.258412970999757
}
}
},
"_update_policy": {
"total": 359.77596128700134,
"count": 97,
"self": 300.67963478401714,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.0963265029842,
"count": 2910,
"self": 59.0963265029842
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3509998098015785e-06,
"count": 1,
"self": 1.3509998098015785e-06
},
"TrainerController._save_models": {
"total": 0.18031817500013858,
"count": 1,
"self": 0.003204454000297119,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17711372099984146,
"count": 1,
"self": 0.17711372099984146
}
}
}
}
}
}
}