ppo-Huggy / run_logs /timers.json
TheTeamBuilder's picture
Huggy
52be431
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405710220336914,
"min": 1.405710220336914,
"max": 1.4263275861740112,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69966.4140625,
"min": 68330.421875,
"max": 75426.0859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 107.18736383442265,
"min": 85.67764298093587,
"max": 424.16101694915255,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49199.0,
"min": 48882.0,
"max": 50051.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999933.0,
"min": 49808.0,
"max": 1999933.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999933.0,
"min": 49808.0,
"max": 1999933.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.367574691772461,
"min": 0.16920553147792816,
"max": 2.4795570373535156,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1086.716796875,
"min": 19.797046661376953,
"max": 1411.1474609375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5041888471782077,
"min": 2.0079374512036643,
"max": 3.9464882178357126,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1608.4226808547974,
"min": 234.9286817908287,
"max": 2261.588827729225,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5041888471782077,
"min": 2.0079374512036643,
"max": 3.9464882178357126,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1608.4226808547974,
"min": 234.9286817908287,
"max": 2261.588827729225,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016173494507650807,
"min": 0.014006655630914288,
"max": 0.01952426765023524,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04852048352295242,
"min": 0.02835403488716111,
"max": 0.058572802950705716,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.047835040175252495,
"min": 0.022441325864444174,
"max": 0.06231017721196015,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14350512052575748,
"min": 0.04488265172888835,
"max": 0.18176486181716123,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.180798939766669e-06,
"min": 3.180798939766669e-06,
"max": 0.00029532202655932493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.542396819300007e-06,
"min": 9.542396819300007e-06,
"max": 0.0008440644186451998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106023333333335,
"min": 0.10106023333333335,
"max": 0.19844067500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031807,
"min": 0.20736899999999997,
"max": 0.5813548000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.29056433333334e-05,
"min": 6.29056433333334e-05,
"max": 0.004922189682499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001887169300000002,
"min": 0.0001887169300000002,
"max": 0.01406960452,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673210237",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673212729"
},
"total": 2491.95964455,
"count": 1,
"self": 0.39238108300014574,
"children": {
"run_training.setup": {
"total": 0.130513034000046,
"count": 1,
"self": 0.130513034000046
},
"TrainerController.start_learning": {
"total": 2491.436750433,
"count": 1,
"self": 4.364100948987925,
"children": {
"TrainerController._reset_env": {
"total": 8.115697790000013,
"count": 1,
"self": 8.115697790000013
},
"TrainerController.advance": {
"total": 2478.8290878030116,
"count": 231891,
"self": 4.58859900414609,
"children": {
"env_step": {
"total": 1963.8838011999296,
"count": 231891,
"self": 1652.0529995459365,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.9170775301185,
"count": 231891,
"self": 15.775765976265916,
"children": {
"TorchPolicy.evaluate": {
"total": 293.1413115538526,
"count": 223014,
"self": 72.7815757858408,
"children": {
"TorchPolicy.sample_actions": {
"total": 220.35973576801177,
"count": 223014,
"self": 220.35973576801177
}
}
}
}
},
"workers": {
"total": 2.9137241238746583,
"count": 231891,
"self": 0.0,
"children": {
"worker_root": {
"total": 2482.779344342949,
"count": 231891,
"is_parallel": true,
"self": 1122.6038354740158,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021262930000602864,
"count": 1,
"is_parallel": true,
"self": 0.0003792629998997654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001747030000160521,
"count": 2,
"is_parallel": true,
"self": 0.001747030000160521
}
}
},
"UnityEnvironment.step": {
"total": 0.03093041700003596,
"count": 1,
"is_parallel": true,
"self": 0.000323539000078199,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020755999992161378,
"count": 1,
"is_parallel": true,
"self": 0.00020755999992161378
},
"communicator.exchange": {
"total": 0.029629567000029056,
"count": 1,
"is_parallel": true,
"self": 0.029629567000029056
},
"steps_from_proto": {
"total": 0.0007697510000070906,
"count": 1,
"is_parallel": true,
"self": 0.00025738299996191927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005123680000451714,
"count": 2,
"is_parallel": true,
"self": 0.0005123680000451714
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1360.1755088689333,
"count": 231890,
"is_parallel": true,
"self": 37.54601666491635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.19720956403364,
"count": 231890,
"is_parallel": true,
"self": 89.19720956403364
},
"communicator.exchange": {
"total": 1129.4379577269895,
"count": 231890,
"is_parallel": true,
"self": 1129.4379577269895
},
"steps_from_proto": {
"total": 103.99432491299387,
"count": 231890,
"is_parallel": true,
"self": 45.44910446217705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.54522045081683,
"count": 463780,
"is_parallel": true,
"self": 58.54522045081683
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 510.35668759893576,
"count": 231891,
"self": 6.799810051886425,
"children": {
"process_trajectory": {
"total": 165.40853974704999,
"count": 231891,
"self": 164.14229242905026,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2662473179997278,
"count": 10,
"self": 1.2662473179997278
}
}
},
"_update_policy": {
"total": 338.14833779999935,
"count": 97,
"self": 283.2754373900018,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.872900409997555,
"count": 2910,
"self": 54.872900409997555
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.180002962239087e-07,
"count": 1,
"self": 9.180002962239087e-07
},
"TrainerController._save_models": {
"total": 0.12786297299999205,
"count": 1,
"self": 0.0022603980000894808,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12560257499990257,
"count": 1,
"self": 0.12560257499990257
}
}
}
}
}
}
}