ppo-Huggy / run_logs /timers.json
kylejryan's picture
Huggy
827e6f4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4004398584365845,
"min": 1.4004294872283936,
"max": 1.4284758567810059,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69449.2109375,
"min": 68717.7890625,
"max": 76949.359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.15116279069767,
"min": 77.76947040498442,
"max": 397.21259842519686,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49455.0,
"min": 48821.0,
"max": 50446.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999999.0,
"min": 49832.0,
"max": 1999999.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999999.0,
"min": 49832.0,
"max": 1999999.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.43107008934021,
"min": 0.08492029458284378,
"max": 2.468428373336792,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1463.504150390625,
"min": 10.699956893920898,
"max": 1584.73095703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.807217082906007,
"min": 1.7917715419852545,
"max": 3.953994063358733,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2291.944683909416,
"min": 225.76321429014206,
"max": 2486.9620457291603,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.807217082906007,
"min": 1.7917715419852545,
"max": 3.953994063358733,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2291.944683909416,
"min": 225.76321429014206,
"max": 2486.9620457291603,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014808003162033857,
"min": 0.013309571975939129,
"max": 0.019331553151338693,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04442400948610157,
"min": 0.028208918018693414,
"max": 0.05692567822989077,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05533055547210906,
"min": 0.022516703171034654,
"max": 0.06040185143550237,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16599166641632718,
"min": 0.04503340634206931,
"max": 0.18120555430650712,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.551298816266667e-06,
"min": 3.551298816266667e-06,
"max": 0.0002953503765498749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0653896448800001e-05,
"min": 1.0653896448800001e-05,
"max": 0.0008439559686813499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118373333333336,
"min": 0.10118373333333336,
"max": 0.19845012500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035512000000001,
"min": 0.20755145000000003,
"max": 0.58131865,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.906829333333331e-05,
"min": 6.906829333333331e-05,
"max": 0.004922661237500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020720487999999996,
"min": 0.00020720487999999996,
"max": 0.014067800635000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675452425",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675454774"
},
"total": 2349.429469474,
"count": 1,
"self": 0.7505020100002184,
"children": {
"run_training.setup": {
"total": 0.10475321300009455,
"count": 1,
"self": 0.10475321300009455
},
"TrainerController.start_learning": {
"total": 2348.574214251,
"count": 1,
"self": 4.1429053399338045,
"children": {
"TrainerController._reset_env": {
"total": 10.674579998000013,
"count": 1,
"self": 10.674579998000013
},
"TrainerController.advance": {
"total": 2333.6367375370664,
"count": 232902,
"self": 4.365856489049747,
"children": {
"env_step": {
"total": 1824.280420731977,
"count": 232902,
"self": 1524.825424421057,
"children": {
"SubprocessEnvManager._take_step": {
"total": 296.62622484004737,
"count": 232902,
"self": 15.316994842100826,
"children": {
"TorchPolicy.evaluate": {
"total": 281.30922999794655,
"count": 222941,
"self": 69.70399485402947,
"children": {
"TorchPolicy.sample_actions": {
"total": 211.60523514391707,
"count": 222941,
"self": 211.60523514391707
}
}
}
}
},
"workers": {
"total": 2.8287714708727663,
"count": 232902,
"self": 0.0,
"children": {
"worker_root": {
"total": 2340.5775486009097,
"count": 232902,
"is_parallel": true,
"self": 1100.249561975918,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002098766000017349,
"count": 1,
"is_parallel": true,
"self": 0.0003239940000412389,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017747719999761102,
"count": 2,
"is_parallel": true,
"self": 0.0017747719999761102
}
}
},
"UnityEnvironment.step": {
"total": 0.026988974000005328,
"count": 1,
"is_parallel": true,
"self": 0.0002835999999888372,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002029849999871658,
"count": 1,
"is_parallel": true,
"self": 0.0002029849999871658
},
"communicator.exchange": {
"total": 0.025801354999998694,
"count": 1,
"is_parallel": true,
"self": 0.025801354999998694
},
"steps_from_proto": {
"total": 0.0007010340000306314,
"count": 1,
"is_parallel": true,
"self": 0.0002337800001441792,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004672539998864522,
"count": 2,
"is_parallel": true,
"self": 0.0004672539998864522
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1240.3279866249918,
"count": 232901,
"is_parallel": true,
"self": 34.113972710124926,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.89576945195267,
"count": 232901,
"is_parallel": true,
"self": 75.89576945195267
},
"communicator.exchange": {
"total": 1035.1666956468998,
"count": 232901,
"is_parallel": true,
"self": 1035.1666956468998
},
"steps_from_proto": {
"total": 95.1515488160145,
"count": 232901,
"is_parallel": true,
"self": 39.53147753492851,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.620071281085984,
"count": 465802,
"is_parallel": true,
"self": 55.620071281085984
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 504.99046031603973,
"count": 232902,
"self": 6.559583651035609,
"children": {
"process_trajectory": {
"total": 162.84209156800205,
"count": 232902,
"self": 161.59451832800175,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2475732400002926,
"count": 10,
"self": 1.2475732400002926
}
}
},
"_update_policy": {
"total": 335.5887850970021,
"count": 97,
"self": 280.917901817998,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.6708832790041,
"count": 2910,
"self": 54.6708832790041
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.030000001075678e-07,
"count": 1,
"self": 9.030000001075678e-07
},
"TrainerController._save_models": {
"total": 0.11999047299968879,
"count": 1,
"self": 0.0030195639997145918,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1169709089999742,
"count": 1,
"self": 0.1169709089999742
}
}
}
}
}
}
}