ppo-Huggy / run_logs /timers.json
Jjateen's picture
Huggy
c2de4c9
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.400914192199707,
"min": 1.400914192199707,
"max": 1.4266990423202515,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69751.515625,
"min": 68349.6953125,
"max": 77391.1796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.84972170686457,
"min": 79.1824,
"max": 397.944,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48968.0,
"min": 48968.0,
"max": 50171.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49773.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49773.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4022228717803955,
"min": 0.025720704346895218,
"max": 2.4663023948669434,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1294.798095703125,
"min": 3.292250156402588,
"max": 1511.661865234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6462078499218973,
"min": 1.6746281345840544,
"max": 3.9951880241878728,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1965.3060311079025,
"min": 214.35240122675896,
"max": 2433.42251265049,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6462078499218973,
"min": 1.6746281345840544,
"max": 3.9951880241878728,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1965.3060311079025,
"min": 214.35240122675896,
"max": 2433.42251265049,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018311425098363542,
"min": 0.013626512342792316,
"max": 0.019621442680606074,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05493427529509063,
"min": 0.02725302468558463,
"max": 0.05540119710130967,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05584015448888143,
"min": 0.02231159492706259,
"max": 0.05758215716729562,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1675204634666443,
"min": 0.04462318985412518,
"max": 0.16904097323616346,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5596488134833324e-06,
"min": 3.5596488134833324e-06,
"max": 0.00029532577655807493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0678946440449997e-05,
"min": 1.0678946440449997e-05,
"max": 0.0008441866686044499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118651666666667,
"min": 0.10118651666666667,
"max": 0.19844192500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30355955,
"min": 0.2075049,
"max": 0.58139555,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.920718166666668e-05,
"min": 6.920718166666668e-05,
"max": 0.004922252057500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020762154500000005,
"min": 0.00020762154500000005,
"max": 0.014071637945,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702563910",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702566580"
},
"total": 2670.071095662,
"count": 1,
"self": 0.44339682000008906,
"children": {
"run_training.setup": {
"total": 0.06576115100000379,
"count": 1,
"self": 0.06576115100000379
},
"TrainerController.start_learning": {
"total": 2669.561937691,
"count": 1,
"self": 5.126980892975553,
"children": {
"TrainerController._reset_env": {
"total": 3.5736519269999576,
"count": 1,
"self": 3.5736519269999576
},
"TrainerController.advance": {
"total": 2660.7406320320247,
"count": 232546,
"self": 5.311125611137868,
"children": {
"env_step": {
"total": 2161.6252660400323,
"count": 232546,
"self": 1792.7105437730825,
"children": {
"SubprocessEnvManager._take_step": {
"total": 365.64664974097707,
"count": 232546,
"self": 18.08808207299336,
"children": {
"TorchPolicy.evaluate": {
"total": 347.5585676679837,
"count": 222962,
"self": 347.5585676679837
}
}
},
"workers": {
"total": 3.268072525972798,
"count": 232546,
"self": 0.0,
"children": {
"worker_root": {
"total": 2661.4381706839845,
"count": 232546,
"is_parallel": true,
"self": 1205.9467230559715,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009704779999992752,
"count": 1,
"is_parallel": true,
"self": 0.00026317199996128693,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007073060000379883,
"count": 2,
"is_parallel": true,
"self": 0.0007073060000379883
}
}
},
"UnityEnvironment.step": {
"total": 0.03201263099998641,
"count": 1,
"is_parallel": true,
"self": 0.0003476399999158275,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002325270000369528,
"count": 1,
"is_parallel": true,
"self": 0.0002325270000369528
},
"communicator.exchange": {
"total": 0.030600460000016483,
"count": 1,
"is_parallel": true,
"self": 0.030600460000016483
},
"steps_from_proto": {
"total": 0.0008320040000171502,
"count": 1,
"is_parallel": true,
"self": 0.0002414909999970405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005905130000201098,
"count": 2,
"is_parallel": true,
"self": 0.0005905130000201098
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1455.491447628013,
"count": 232545,
"is_parallel": true,
"self": 43.006675158951566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 91.96556322598468,
"count": 232545,
"is_parallel": true,
"self": 91.96556322598468
},
"communicator.exchange": {
"total": 1221.10122807899,
"count": 232545,
"is_parallel": true,
"self": 1221.10122807899
},
"steps_from_proto": {
"total": 99.41798116408677,
"count": 232545,
"is_parallel": true,
"self": 36.668935655154826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.74904550893194,
"count": 465090,
"is_parallel": true,
"self": 62.74904550893194
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 493.8042403808546,
"count": 232546,
"self": 8.027296678835228,
"children": {
"process_trajectory": {
"total": 168.27197827001896,
"count": 232546,
"self": 166.92299347501898,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3489847949999785,
"count": 10,
"self": 1.3489847949999785
}
}
},
"_update_policy": {
"total": 317.5049654320004,
"count": 97,
"self": 255.5844671879978,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.9204982440026,
"count": 2910,
"self": 61.9204982440026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.620001269970089e-07,
"count": 1,
"self": 7.620001269970089e-07
},
"TrainerController._save_models": {
"total": 0.12067207699965365,
"count": 1,
"self": 0.0022316199992928887,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11844045700036077,
"count": 1,
"self": 0.11844045700036077
}
}
}
}
}
}
}