ppo-Huggy / run_logs /timers.json
danieliser's picture
Huggy
2da728d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4056875705718994,
"min": 1.4056875705718994,
"max": 1.4283348321914673,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69674.3125,
"min": 69269.3203125,
"max": 76241.9140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 78.27777777777777,
"min": 70.58,
"max": 378.2651515151515,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49315.0,
"min": 49233.0,
"max": 49995.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999931.0,
"min": 49876.0,
"max": 1999931.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999931.0,
"min": 49876.0,
"max": 1999931.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.478210210800171,
"min": -0.04256509989500046,
"max": 2.530824899673462,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1561.2724609375,
"min": -5.576027870178223,
"max": 1699.666015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.779028427317029,
"min": 1.7891313087849217,
"max": 4.041845715184775,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2380.7879092097282,
"min": 234.37620145082474,
"max": 2709.323141157627,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.779028427317029,
"min": 1.7891313087849217,
"max": 4.041845715184775,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2380.7879092097282,
"min": 234.37620145082474,
"max": 2709.323141157627,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016289886602276562,
"min": 0.012674351205593362,
"max": 0.019249122698480888,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04886965980682968,
"min": 0.025348702411186724,
"max": 0.05774736809544266,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06116734718283017,
"min": 0.023623546357784006,
"max": 0.06129620323578516,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1835020415484905,
"min": 0.04964270324756702,
"max": 0.1838886097073555,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.760648746483339e-06,
"min": 3.760648746483339e-06,
"max": 0.00029538225153925,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1281946239450018e-05,
"min": 1.1281946239450018e-05,
"max": 0.0008443524185492,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012535166666667,
"min": 0.1012535166666667,
"max": 0.19846075000000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037605500000001,
"min": 0.20763484999999998,
"max": 0.5814508,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.255048166666679e-05,
"min": 7.255048166666679e-05,
"max": 0.004923191425000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021765144500000036,
"min": 0.00021765144500000036,
"max": 0.01407439492,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681808013",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681810689"
},
"total": 2676.571551168,
"count": 1,
"self": 0.47922888199991576,
"children": {
"run_training.setup": {
"total": 0.20551382400003604,
"count": 1,
"self": 0.20551382400003604
},
"TrainerController.start_learning": {
"total": 2675.8868084620003,
"count": 1,
"self": 5.046203243889977,
"children": {
"TrainerController._reset_env": {
"total": 4.669607957999915,
"count": 1,
"self": 4.669607957999915
},
"TrainerController.advance": {
"total": 2666.0343385101105,
"count": 233061,
"self": 5.325675718122056,
"children": {
"env_step": {
"total": 2103.880492438011,
"count": 233061,
"self": 1784.7460509020752,
"children": {
"SubprocessEnvManager._take_step": {
"total": 315.8309967489655,
"count": 233061,
"self": 18.515733182038048,
"children": {
"TorchPolicy.evaluate": {
"total": 297.31526356692746,
"count": 222902,
"self": 297.31526356692746
}
}
},
"workers": {
"total": 3.303444786970431,
"count": 233061,
"self": 0.0,
"children": {
"worker_root": {
"total": 2666.818447831979,
"count": 233061,
"is_parallel": true,
"self": 1203.9287422680127,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011877749999484877,
"count": 1,
"is_parallel": true,
"self": 0.00034916200002044206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008386129999280456,
"count": 2,
"is_parallel": true,
"self": 0.0008386129999280456
}
}
},
"UnityEnvironment.step": {
"total": 0.03430294999998296,
"count": 1,
"is_parallel": true,
"self": 0.00033229599989681446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002490090000719647,
"count": 1,
"is_parallel": true,
"self": 0.0002490090000719647
},
"communicator.exchange": {
"total": 0.032986687000061465,
"count": 1,
"is_parallel": true,
"self": 0.032986687000061465
},
"steps_from_proto": {
"total": 0.0007349579999527123,
"count": 1,
"is_parallel": true,
"self": 0.00023925799996504793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004956999999876643,
"count": 2,
"is_parallel": true,
"self": 0.0004956999999876643
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1462.8897055639663,
"count": 233060,
"is_parallel": true,
"self": 42.89674820605387,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 95.21306662894881,
"count": 233060,
"is_parallel": true,
"self": 95.21306662894881
},
"communicator.exchange": {
"total": 1223.1935461149283,
"count": 233060,
"is_parallel": true,
"self": 1223.1935461149283
},
"steps_from_proto": {
"total": 101.5863446140354,
"count": 233060,
"is_parallel": true,
"self": 41.07865791497886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.507686699056535,
"count": 466120,
"is_parallel": true,
"self": 60.507686699056535
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 556.8281703539774,
"count": 233061,
"self": 7.192369943965218,
"children": {
"process_trajectory": {
"total": 153.36232952700846,
"count": 233061,
"self": 151.77577839000799,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5865511370004697,
"count": 10,
"self": 1.5865511370004697
}
}
},
"_update_policy": {
"total": 396.2734708830037,
"count": 97,
"self": 335.29471201399485,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.97875886900886,
"count": 2910,
"self": 60.97875886900886
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0349999683967326e-06,
"count": 1,
"self": 1.0349999683967326e-06
},
"TrainerController._save_models": {
"total": 0.1366577150001831,
"count": 1,
"self": 0.002838611000242963,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13381910399994013,
"count": 1,
"self": 0.13381910399994013
}
}
}
}
}
}
}