ppo-Huggy / run_logs /timers.json
dbohle's picture
Huggy
1096667
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4042648077011108,
"min": 1.4042648077011108,
"max": 1.4280095100402832,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68824.421875,
"min": 68531.0078125,
"max": 77645.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 95.20537428023033,
"min": 85.0,
"max": 391.38759689922483,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49602.0,
"min": 48739.0,
"max": 50489.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999446.0,
"min": 49868.0,
"max": 1999446.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999446.0,
"min": 49868.0,
"max": 1999446.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3058736324310303,
"min": 0.20548464357852936,
"max": 2.428972005844116,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1201.360107421875,
"min": 26.302034378051758,
"max": 1393.8668212890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5613068776945234,
"min": 1.7601615749299526,
"max": 3.9191761400417806,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1855.4408832788467,
"min": 225.30068159103394,
"max": 2229.512532055378,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5613068776945234,
"min": 1.7601615749299526,
"max": 3.9191761400417806,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1855.4408832788467,
"min": 225.30068159103394,
"max": 2229.512532055378,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018612535969198995,
"min": 0.013268270209664479,
"max": 0.021246919226056586,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05583760790759698,
"min": 0.026536540419328958,
"max": 0.06034638877463294,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.050154065051012574,
"min": 0.02187526747584343,
"max": 0.056176849868562494,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15046219515303771,
"min": 0.04375053495168686,
"max": 0.16853054960568747,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.314748895116675e-06,
"min": 3.314748895116675e-06,
"max": 0.00029535435154855,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.944246685350026e-06,
"min": 9.944246685350026e-06,
"max": 0.00084403336865555,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110488333333333,
"min": 0.10110488333333333,
"max": 0.19845145000000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30331464999999996,
"min": 0.20738300000000004,
"max": 0.5813444500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.513367833333347e-05,
"min": 6.513367833333347e-05,
"max": 0.004922727355,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019540103500000042,
"min": 0.00019540103500000042,
"max": 0.014069088054999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671456362",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671458472"
},
"total": 2109.3072609009996,
"count": 1,
"self": 0.3909499399997003,
"children": {
"run_training.setup": {
"total": 0.10386420000008911,
"count": 1,
"self": 0.10386420000008911
},
"TrainerController.start_learning": {
"total": 2108.8124467609996,
"count": 1,
"self": 3.4968703640688545,
"children": {
"TrainerController._reset_env": {
"total": 7.780549768000128,
"count": 1,
"self": 7.780549768000128
},
"TrainerController.advance": {
"total": 2097.419947112931,
"count": 231865,
"self": 3.8254660580864766,
"children": {
"env_step": {
"total": 1640.6263608679103,
"count": 231865,
"self": 1376.1940595639865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.0423993729596,
"count": 231865,
"self": 13.515194320989849,
"children": {
"TorchPolicy.evaluate": {
"total": 248.52720505196976,
"count": 222874,
"self": 62.22556413095094,
"children": {
"TorchPolicy.sample_actions": {
"total": 186.30164092101882,
"count": 222874,
"self": 186.30164092101882
}
}
}
}
},
"workers": {
"total": 2.3899019309642426,
"count": 231865,
"self": 0.0,
"children": {
"worker_root": {
"total": 2101.432540631959,
"count": 231865,
"is_parallel": true,
"self": 968.8520673339672,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020469539999794506,
"count": 1,
"is_parallel": true,
"self": 0.0003134020000743476,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001733551999905103,
"count": 2,
"is_parallel": true,
"self": 0.001733551999905103
}
}
},
"UnityEnvironment.step": {
"total": 0.026357608000125765,
"count": 1,
"is_parallel": true,
"self": 0.0003071490000365884,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020050600005561137,
"count": 1,
"is_parallel": true,
"self": 0.00020050600005561137
},
"communicator.exchange": {
"total": 0.025126412999952663,
"count": 1,
"is_parallel": true,
"self": 0.025126412999952663
},
"steps_from_proto": {
"total": 0.0007235400000809022,
"count": 1,
"is_parallel": true,
"self": 0.00024741400011407677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004761259999668255,
"count": 2,
"is_parallel": true,
"self": 0.0004761259999668255
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1132.5804732979918,
"count": 231864,
"is_parallel": true,
"self": 33.08569840402993,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.21248046007577,
"count": 231864,
"is_parallel": true,
"self": 72.21248046007577
},
"communicator.exchange": {
"total": 938.6310918818178,
"count": 231864,
"is_parallel": true,
"self": 938.6310918818178
},
"steps_from_proto": {
"total": 88.6512025520683,
"count": 231864,
"is_parallel": true,
"self": 36.42805684215455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.22314570991375,
"count": 463728,
"is_parallel": true,
"self": 52.22314570991375
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 452.96812018693413,
"count": 231865,
"self": 5.646545165016505,
"children": {
"process_trajectory": {
"total": 140.29518736991713,
"count": 231865,
"self": 139.16039212591772,
"children": {
"RLTrainer._checkpoint": {
"total": 1.134795243999406,
"count": 10,
"self": 1.134795243999406
}
}
},
"_update_policy": {
"total": 307.0263876520005,
"count": 97,
"self": 254.7117518510047,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.3146358009958,
"count": 2910,
"self": 52.3146358009958
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.829997568682302e-07,
"count": 1,
"self": 8.829997568682302e-07
},
"TrainerController._save_models": {
"total": 0.11507863299993915,
"count": 1,
"self": 0.0020895499997095612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11298908300022958,
"count": 1,
"self": 0.11298908300022958
}
}
}
}
}
}
}