ppo-doggy / run_logs /timers.json
María Navas Loro
Huggy
d10d27a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4068101644515991,
"min": 1.4068101644515991,
"max": 1.42917001247406,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69065.9375,
"min": 68813.1875,
"max": 76109.8125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.72222222222223,
"min": 77.7968503937008,
"max": 372.53731343283584,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49478.0,
"min": 49116.0,
"max": 50023.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999275.0,
"min": 49423.0,
"max": 1999275.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999275.0,
"min": 49423.0,
"max": 1999275.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3740453720092773,
"min": 0.030546603724360466,
"max": 2.461883544921875,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1111.05322265625,
"min": 4.0626983642578125,
"max": 1500.545166015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5831660396522946,
"min": 1.7740338038008912,
"max": 3.996669261645427,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1676.9217065572739,
"min": 235.94649590551853,
"max": 2313.2562918663025,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5831660396522946,
"min": 1.7740338038008912,
"max": 3.996669261645427,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1676.9217065572739,
"min": 235.94649590551853,
"max": 2313.2562918663025,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015568150865870696,
"min": 0.013219520504844696,
"max": 0.01936895029502921,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.046704452597612085,
"min": 0.02643904100968939,
"max": 0.05691865171975223,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04847045913338661,
"min": 0.021066746612389883,
"max": 0.07069078679713937,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14541137740015983,
"min": 0.042133493224779765,
"max": 0.21207236039141814,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.429148856983345e-06,
"min": 3.429148856983345e-06,
"max": 0.00029533372655542504,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0287446570950034e-05,
"min": 1.0287446570950034e-05,
"max": 0.0008441172186275999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114301666666665,
"min": 0.10114301666666665,
"max": 0.19844457500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30342905,
"min": 0.2074336,
"max": 0.5813724,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.703653166666684e-05,
"min": 6.703653166666684e-05,
"max": 0.0049223842925,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020110959500000054,
"min": 0.00020110959500000054,
"max": 0.01407048276,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670537812",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670540027"
},
"total": 2214.935405227,
"count": 1,
"self": 0.39603213300006246,
"children": {
"run_training.setup": {
"total": 0.16791496700000152,
"count": 1,
"self": 0.16791496700000152
},
"TrainerController.start_learning": {
"total": 2214.371458127,
"count": 1,
"self": 3.868461380999179,
"children": {
"TrainerController._reset_env": {
"total": 11.597598023999979,
"count": 1,
"self": 11.597598023999979
},
"TrainerController.advance": {
"total": 2198.7944016390006,
"count": 231809,
"self": 4.065903311010061,
"children": {
"env_step": {
"total": 1713.2886013829968,
"count": 231809,
"self": 1436.4817018010626,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.2388071719449,
"count": 231809,
"self": 14.310021192935551,
"children": {
"TorchPolicy.evaluate": {
"total": 259.9287859790094,
"count": 222983,
"self": 64.80908239298083,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.11970358602855,
"count": 222983,
"self": 195.11970358602855
}
}
}
}
},
"workers": {
"total": 2.568092409989333,
"count": 231809,
"self": 0.0,
"children": {
"worker_root": {
"total": 2206.8896347330624,
"count": 231809,
"is_parallel": true,
"self": 1031.5844386020913,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019152559999611185,
"count": 1,
"is_parallel": true,
"self": 0.0003335719999313369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015816840000297816,
"count": 2,
"is_parallel": true,
"self": 0.0015816840000297816
}
}
},
"UnityEnvironment.step": {
"total": 0.02753458199998704,
"count": 1,
"is_parallel": true,
"self": 0.00030914099994561184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001833950000218465,
"count": 1,
"is_parallel": true,
"self": 0.0001833950000218465
},
"communicator.exchange": {
"total": 0.026288081000018337,
"count": 1,
"is_parallel": true,
"self": 0.026288081000018337
},
"steps_from_proto": {
"total": 0.0007539650000012443,
"count": 1,
"is_parallel": true,
"self": 0.00028100299999778144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004729620000034629,
"count": 2,
"is_parallel": true,
"self": 0.0004729620000034629
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1175.3051961309711,
"count": 231808,
"is_parallel": true,
"self": 33.602493038035846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.5837256320209,
"count": 231808,
"is_parallel": true,
"self": 73.5837256320209
},
"communicator.exchange": {
"total": 977.788849860994,
"count": 231808,
"is_parallel": true,
"self": 977.788849860994
},
"steps_from_proto": {
"total": 90.33012759992039,
"count": 231808,
"is_parallel": true,
"self": 37.364019125991376,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.96610847392901,
"count": 463616,
"is_parallel": true,
"self": 52.96610847392901
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 481.43989694499396,
"count": 231809,
"self": 6.151065992018573,
"children": {
"process_trajectory": {
"total": 148.72739266497558,
"count": 231809,
"self": 148.19646096997576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.530931694999822,
"count": 4,
"self": 0.530931694999822
}
}
},
"_update_policy": {
"total": 326.5614382879998,
"count": 97,
"self": 272.00278166299927,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.55865662500054,
"count": 2910,
"self": 54.55865662500054
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.810002327663824e-07,
"count": 1,
"self": 8.810002327663824e-07
},
"TrainerController._save_models": {
"total": 0.11099620200002391,
"count": 1,
"self": 0.0019537130001481273,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10904248899987579,
"count": 1,
"self": 0.10904248899987579
}
}
}
}
}
}
}