ppo-Huggy / run_logs /timers.json
Cyber-Machine's picture
Huggy
c996ea3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405411720275879,
"min": 1.405411720275879,
"max": 1.4318861961364746,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72011.890625,
"min": 68791.96875,
"max": 77639.625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 112.210407239819,
"min": 101.67148760330579,
"max": 423.5546218487395,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49597.0,
"min": 48924.0,
"max": 50403.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999952.0,
"min": 49857.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999952.0,
"min": 49857.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.288228988647461,
"min": -0.05898061767220497,
"max": 2.371009111404419,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1011.397216796875,
"min": -6.959712982177734,
"max": 1122.308837890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.481129947156388,
"min": 1.7820990666494532,
"max": 3.8842273863879115,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1538.6594366431236,
"min": 210.28768986463547,
"max": 1723.72651720047,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.481129947156388,
"min": 1.7820990666494532,
"max": 3.8842273863879115,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1538.6594366431236,
"min": 210.28768986463547,
"max": 1723.72651720047,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017952424590475857,
"min": 0.013406705137701161,
"max": 0.021753117905852076,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.035904849180951715,
"min": 0.026813410275402322,
"max": 0.061713808709949566,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04845151311407486,
"min": 0.021252846407393614,
"max": 0.06842205077409744,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09690302622814972,
"min": 0.04250569281478723,
"max": 0.20384134612977506,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.912998695699991e-06,
"min": 3.912998695699991e-06,
"max": 0.000295340626553125,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 7.825997391399982e-06,
"min": 7.825997391399982e-06,
"max": 0.0008441722686092499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1013043,
"min": 0.1013043,
"max": 0.19844687500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2026086,
"min": 0.2026086,
"max": 0.5813907500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.508456999999989e-05,
"min": 7.508456999999989e-05,
"max": 0.0049224990625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015016913999999978,
"min": 0.00015016913999999978,
"max": 0.014071398425000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672172600",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672174795"
},
"total": 2194.805721719,
"count": 1,
"self": 0.3920503569997891,
"children": {
"run_training.setup": {
"total": 0.1284058059999893,
"count": 1,
"self": 0.1284058059999893
},
"TrainerController.start_learning": {
"total": 2194.285265556,
"count": 1,
"self": 3.7711830069774805,
"children": {
"TrainerController._reset_env": {
"total": 8.561112200000025,
"count": 1,
"self": 8.561112200000025
},
"TrainerController.advance": {
"total": 2181.8313167480223,
"count": 230946,
"self": 4.1609159069748785,
"children": {
"env_step": {
"total": 1719.9729042650097,
"count": 230946,
"self": 1440.9575609100905,
"children": {
"SubprocessEnvManager._take_step": {
"total": 276.4637706829729,
"count": 230946,
"self": 14.174676477982018,
"children": {
"TorchPolicy.evaluate": {
"total": 262.2890942049909,
"count": 223070,
"self": 65.9387494720666,
"children": {
"TorchPolicy.sample_actions": {
"total": 196.35034473292427,
"count": 223070,
"self": 196.35034473292427
}
}
}
}
},
"workers": {
"total": 2.551572671946303,
"count": 230946,
"self": 0.0,
"children": {
"worker_root": {
"total": 2186.627082882973,
"count": 230946,
"is_parallel": true,
"self": 1001.1281734879376,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002227395999966575,
"count": 1,
"is_parallel": true,
"self": 0.0003645439999786504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018628519999879245,
"count": 2,
"is_parallel": true,
"self": 0.0018628519999879245
}
}
},
"UnityEnvironment.step": {
"total": 0.02664764200000036,
"count": 1,
"is_parallel": true,
"self": 0.00029310700000451106,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017173100002310093,
"count": 1,
"is_parallel": true,
"self": 0.00017173100002310093
},
"communicator.exchange": {
"total": 0.025483485999984623,
"count": 1,
"is_parallel": true,
"self": 0.025483485999984623
},
"steps_from_proto": {
"total": 0.0006993179999881249,
"count": 1,
"is_parallel": true,
"self": 0.00025564699996039053,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004436710000277344,
"count": 2,
"is_parallel": true,
"self": 0.0004436710000277344
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1185.4989093950353,
"count": 230945,
"is_parallel": true,
"self": 33.85120677004852,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.89714934805227,
"count": 230945,
"is_parallel": true,
"self": 73.89714934805227
},
"communicator.exchange": {
"total": 985.8102032129173,
"count": 230945,
"is_parallel": true,
"self": 985.8102032129173
},
"steps_from_proto": {
"total": 91.94035006401737,
"count": 230945,
"is_parallel": true,
"self": 37.52395877402478,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.41639128999259,
"count": 461890,
"is_parallel": true,
"self": 54.41639128999259
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.6974965760378,
"count": 230946,
"self": 6.26221248490765,
"children": {
"process_trajectory": {
"total": 140.8108965861298,
"count": 230946,
"self": 139.64107053912943,
"children": {
"RLTrainer._checkpoint": {
"total": 1.169826047000356,
"count": 10,
"self": 1.169826047000356
}
}
},
"_update_policy": {
"total": 310.62438750500036,
"count": 96,
"self": 257.81391564400235,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.81047186099801,
"count": 2880,
"self": 52.81047186099801
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.960002221807372e-07,
"count": 1,
"self": 7.960002221807372e-07
},
"TrainerController._save_models": {
"total": 0.12165280499993969,
"count": 1,
"self": 0.0025814989999162208,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11907130600002347,
"count": 1,
"self": 0.11907130600002347
}
}
}
}
}
}
}