ppo-Huggy / run_logs /timers.json
jaybeeja's picture
Huggy
9ba8774
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4050170183181763,
"min": 1.4050170183181763,
"max": 1.426651954650879,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70291.59375,
"min": 68990.5078125,
"max": 76767.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.4524886877828,
"min": 72.83161004431315,
"max": 431.48275862068965,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49362.0,
"min": 49275.0,
"max": 50174.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999934.0,
"min": 49418.0,
"max": 1999934.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999934.0,
"min": 49418.0,
"max": 1999934.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.482393741607666,
"min": 0.04523396119475365,
"max": 2.5163261890411377,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1645.8270263671875,
"min": 5.201905727386475,
"max": 1652.642333984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9005745437170405,
"min": 2.0109472803447557,
"max": 4.0343780570997385,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2586.080922484398,
"min": 231.2589372396469,
"max": 2589.1793364286423,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9005745437170405,
"min": 2.0109472803447557,
"max": 4.0343780570997385,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2586.080922484398,
"min": 231.2589372396469,
"max": 2589.1793364286423,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017136462246546417,
"min": 0.013975194874324692,
"max": 0.020324418674378345,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051409386739639254,
"min": 0.02864614243929585,
"max": 0.05902355649159291,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05940213927792178,
"min": 0.021886035241186617,
"max": 0.061489281989634034,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17820641783376534,
"min": 0.043772070482373235,
"max": 0.1822053258617719,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.813348728916668e-06,
"min": 3.813348728916668e-06,
"max": 0.00029531805156064994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1440046186750004e-05,
"min": 1.1440046186750004e-05,
"max": 0.0008437927687357499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127108333333334,
"min": 0.10127108333333334,
"max": 0.19843934999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30381325000000003,
"min": 0.20767334999999998,
"max": 0.5812642499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.342705833333338e-05,
"min": 7.342705833333338e-05,
"max": 0.004922123565,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022028117500000013,
"min": 0.00022028117500000013,
"max": 0.014065086075,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671394844",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671397005"
},
"total": 2161.241137359,
"count": 1,
"self": 0.3856737759997486,
"children": {
"run_training.setup": {
"total": 0.10407238400000551,
"count": 1,
"self": 0.10407238400000551
},
"TrainerController.start_learning": {
"total": 2160.751391199,
"count": 1,
"self": 3.648774548024903,
"children": {
"TrainerController._reset_env": {
"total": 7.524637197000004,
"count": 1,
"self": 7.524637197000004
},
"TrainerController.advance": {
"total": 2149.4611318199745,
"count": 233192,
"self": 3.8762554740674204,
"children": {
"env_step": {
"total": 1687.9759280688968,
"count": 233192,
"self": 1418.5031001897878,
"children": {
"SubprocessEnvManager._take_step": {
"total": 266.9928251191177,
"count": 233192,
"self": 13.74408993898345,
"children": {
"TorchPolicy.evaluate": {
"total": 253.24873518013425,
"count": 222912,
"self": 63.788968412236045,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.4597667678982,
"count": 222912,
"self": 189.4597667678982
}
}
}
}
},
"workers": {
"total": 2.48000275999118,
"count": 233192,
"self": 0.0,
"children": {
"worker_root": {
"total": 2153.032990063008,
"count": 233192,
"is_parallel": true,
"self": 980.6905790689841,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018505009999785216,
"count": 1,
"is_parallel": true,
"self": 0.00034405799999603914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015064429999824824,
"count": 2,
"is_parallel": true,
"self": 0.0015064429999824824
}
}
},
"UnityEnvironment.step": {
"total": 0.026080995999961942,
"count": 1,
"is_parallel": true,
"self": 0.00025182499996390106,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019859700000779412,
"count": 1,
"is_parallel": true,
"self": 0.00019859700000779412
},
"communicator.exchange": {
"total": 0.024942209000016646,
"count": 1,
"is_parallel": true,
"self": 0.024942209000016646
},
"steps_from_proto": {
"total": 0.0006883649999736008,
"count": 1,
"is_parallel": true,
"self": 0.00023129999988213967,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004570650000914611,
"count": 2,
"is_parallel": true,
"self": 0.0004570650000914611
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1172.342410994024,
"count": 233191,
"is_parallel": true,
"self": 34.03595714301514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.89827083195433,
"count": 233191,
"is_parallel": true,
"self": 74.89827083195433
},
"communicator.exchange": {
"total": 972.4616251330275,
"count": 233191,
"is_parallel": true,
"self": 972.4616251330275
},
"steps_from_proto": {
"total": 90.946557886027,
"count": 233191,
"is_parallel": true,
"self": 37.46099689316998,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.48556099285702,
"count": 466382,
"is_parallel": true,
"self": 53.48556099285702
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.60894827701014,
"count": 233192,
"self": 5.988911645124745,
"children": {
"process_trajectory": {
"total": 147.5534128158858,
"count": 233192,
"self": 146.41793711388522,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1354757020005763,
"count": 10,
"self": 1.1354757020005763
}
}
},
"_update_policy": {
"total": 304.0666238159996,
"count": 97,
"self": 251.89721985501103,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.169403960988575,
"count": 2910,
"self": 52.169403960988575
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.960000741353724e-07,
"count": 1,
"self": 8.960000741353724e-07
},
"TrainerController._save_models": {
"total": 0.11684673800027667,
"count": 1,
"self": 0.0028551100003824104,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11399162799989426,
"count": 1,
"self": 0.11399162799989426
}
}
}
}
}
}
}