ppo-Huggy / run_logs /timers.json
Pech82's picture
Huggy
a20d433
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3997050523757935,
"min": 1.3997050523757935,
"max": 1.4295789003372192,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70757.890625,
"min": 68129.53125,
"max": 78855.265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.37458193979933,
"min": 77.44444444444444,
"max": 383.46564885496184,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49858.0,
"min": 49083.0,
"max": 50234.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999828.0,
"min": 49878.0,
"max": 1999828.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999828.0,
"min": 49878.0,
"max": 1999828.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.462578296661377,
"min": 0.11477489769458771,
"max": 2.4833791255950928,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1475.0843505859375,
"min": 14.920736312866211,
"max": 1531.2698974609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8189333798690313,
"min": 1.6982049889289417,
"max": 3.966277137875981,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2287.5410945415497,
"min": 220.7666485607624,
"max": 2387.3166424036026,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8189333798690313,
"min": 1.6982049889289417,
"max": 3.966277137875981,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2287.5410945415497,
"min": 220.7666485607624,
"max": 2387.3166424036026,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01624510536267836,
"min": 0.013395747907149296,
"max": 0.02078788613337868,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04873531608803508,
"min": 0.026791495814298592,
"max": 0.05591685092222179,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061127318400475715,
"min": 0.02075640435020129,
"max": 0.061127318400475715,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18338195520142714,
"min": 0.04151280870040258,
"max": 0.18338195520142714,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.411198862966677e-06,
"min": 3.411198862966677e-06,
"max": 0.00029525805158065,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0233596588900031e-05,
"min": 1.0233596588900031e-05,
"max": 0.0008440398186534,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113703333333335,
"min": 0.10113703333333335,
"max": 0.19841935,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30341110000000004,
"min": 0.20740985,
"max": 0.5813465999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.673796333333351e-05,
"min": 6.673796333333351e-05,
"max": 0.004921125565,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020021389000000054,
"min": 0.00020021389000000054,
"max": 0.01406919534,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670828483",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670830581"
},
"total": 2098.237419065,
"count": 1,
"self": 0.3904298319994268,
"children": {
"run_training.setup": {
"total": 0.1034252250000236,
"count": 1,
"self": 0.1034252250000236
},
"TrainerController.start_learning": {
"total": 2097.7435640080002,
"count": 1,
"self": 3.577162288051568,
"children": {
"TrainerController._reset_env": {
"total": 9.996063476000018,
"count": 1,
"self": 9.996063476000018
},
"TrainerController.advance": {
"total": 2084.0608400999486,
"count": 232262,
"self": 3.8893068008633236,
"children": {
"env_step": {
"total": 1633.530572321065,
"count": 232262,
"self": 1368.1145719681122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.9786327250879,
"count": 232262,
"self": 13.721940169015511,
"children": {
"TorchPolicy.evaluate": {
"total": 249.25669255607238,
"count": 222924,
"self": 63.39851148901562,
"children": {
"TorchPolicy.sample_actions": {
"total": 185.85818106705676,
"count": 222924,
"self": 185.85818106705676
}
}
}
}
},
"workers": {
"total": 2.437367627865001,
"count": 232262,
"self": 0.0,
"children": {
"worker_root": {
"total": 2090.2733724600666,
"count": 232262,
"is_parallel": true,
"self": 968.7250122300761,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00632018600003903,
"count": 1,
"is_parallel": true,
"self": 0.00030332200014981936,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00601686399988921,
"count": 2,
"is_parallel": true,
"self": 0.00601686399988921
}
}
},
"UnityEnvironment.step": {
"total": 0.027504378999992696,
"count": 1,
"is_parallel": true,
"self": 0.00028164200000446726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018202399996880558,
"count": 1,
"is_parallel": true,
"self": 0.00018202399996880558
},
"communicator.exchange": {
"total": 0.026338361000057375,
"count": 1,
"is_parallel": true,
"self": 0.026338361000057375
},
"steps_from_proto": {
"total": 0.0007023519999620476,
"count": 1,
"is_parallel": true,
"self": 0.00022534799984441634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004770040001176312,
"count": 2,
"is_parallel": true,
"self": 0.0004770040001176312
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1121.5483602299905,
"count": 232261,
"is_parallel": true,
"self": 33.89522671093164,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.02553594603432,
"count": 232261,
"is_parallel": true,
"self": 75.02553594603432
},
"communicator.exchange": {
"total": 922.7786124990549,
"count": 232261,
"is_parallel": true,
"self": 922.7786124990549
},
"steps_from_proto": {
"total": 89.84898507396952,
"count": 232261,
"is_parallel": true,
"self": 36.783976452974684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.06500862099483,
"count": 464522,
"is_parallel": true,
"self": 53.06500862099483
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 446.6409609780202,
"count": 232262,
"self": 5.4995307699604155,
"children": {
"process_trajectory": {
"total": 139.10212572505873,
"count": 232262,
"self": 138.64483911305854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45728661200018905,
"count": 4,
"self": 0.45728661200018905
}
}
},
"_update_policy": {
"total": 302.03930448300105,
"count": 97,
"self": 248.84975772401526,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.189546758985784,
"count": 2910,
"self": 53.189546758985784
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.300001693191007e-07,
"count": 1,
"self": 9.300001693191007e-07
},
"TrainerController._save_models": {
"total": 0.10949721399992995,
"count": 1,
"self": 0.0024366069997086015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10706060700022135,
"count": 1,
"self": 0.10706060700022135
}
}
}
}
}
}
}