ppo-Huggy / run_logs /timers.json
RichFrank's picture
Huggy
fbdaf9d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3986024856567383,
"min": 1.3986024856567383,
"max": 1.425862193107605,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68944.109375,
"min": 68944.109375,
"max": 75728.9296875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 72.70353982300885,
"min": 70.9568345323741,
"max": 396.73809523809524,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49293.0,
"min": 49181.0,
"max": 49989.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999964.0,
"min": 49800.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999964.0,
"min": 49800.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4840199947357178,
"min": 0.12131837755441666,
"max": 2.541395664215088,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1684.16552734375,
"min": 15.164796829223633,
"max": 1714.0115966796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.882423716045059,
"min": 1.8758215103149414,
"max": 4.024837925614956,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2632.28327947855,
"min": 234.47768878936768,
"max": 2698.2730723023415,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.882423716045059,
"min": 1.8758215103149414,
"max": 4.024837925614956,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2632.28327947855,
"min": 234.47768878936768,
"max": 2698.2730723023415,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016682259825756773,
"min": 0.013791527722949266,
"max": 0.020982715192561348,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05004677947727032,
"min": 0.029405966661215643,
"max": 0.058150927672735025,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06335273173948129,
"min": 0.022524434576431912,
"max": 0.0642025616640846,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19005819521844386,
"min": 0.045048869152863824,
"max": 0.19005819521844386,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.88289870573333e-06,
"min": 3.88289870573333e-06,
"max": 0.00029533035155654994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1648696117199991e-05,
"min": 1.1648696117199991e-05,
"max": 0.0008443521185493,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10129426666666669,
"min": 0.10129426666666669,
"max": 0.19844345000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30388280000000006,
"min": 0.20772785000000002,
"max": 0.5814506999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.458390666666664e-05,
"min": 7.458390666666664e-05,
"max": 0.004922328155000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022375171999999995,
"min": 0.00022375171999999995,
"max": 0.014074389929999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673583467",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673585770"
},
"total": 2303.0973076339997,
"count": 1,
"self": 0.3900134039995464,
"children": {
"run_training.setup": {
"total": 0.11152892300003714,
"count": 1,
"self": 0.11152892300003714
},
"TrainerController.start_learning": {
"total": 2302.595765307,
"count": 1,
"self": 3.903947186010555,
"children": {
"TrainerController._reset_env": {
"total": 10.658087215000023,
"count": 1,
"self": 10.658087215000023
},
"TrainerController.advance": {
"total": 2287.912113024989,
"count": 233249,
"self": 4.406789371999821,
"children": {
"env_step": {
"total": 1794.1998434009756,
"count": 233249,
"self": 1506.9264141700014,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.5332574069214,
"count": 233249,
"self": 14.621639933935285,
"children": {
"TorchPolicy.evaluate": {
"total": 269.9116174729861,
"count": 222876,
"self": 67.5748598939739,
"children": {
"TorchPolicy.sample_actions": {
"total": 202.33675757901221,
"count": 222876,
"self": 202.33675757901221
}
}
}
}
},
"workers": {
"total": 2.7401718240528226,
"count": 233249,
"self": 0.0,
"children": {
"worker_root": {
"total": 2291.9924411620495,
"count": 233249,
"is_parallel": true,
"self": 1052.2084712731562,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00206223800000771,
"count": 1,
"is_parallel": true,
"self": 0.0003506039998910637,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017116340001166463,
"count": 2,
"is_parallel": true,
"self": 0.0017116340001166463
}
}
},
"UnityEnvironment.step": {
"total": 0.029960058000028766,
"count": 1,
"is_parallel": true,
"self": 0.0003025270001444369,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000187518999950953,
"count": 1,
"is_parallel": true,
"self": 0.000187518999950953
},
"communicator.exchange": {
"total": 0.02848418500002481,
"count": 1,
"is_parallel": true,
"self": 0.02848418500002481
},
"steps_from_proto": {
"total": 0.0009858269999085678,
"count": 1,
"is_parallel": true,
"self": 0.0004280160000007527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005578109999078151,
"count": 2,
"is_parallel": true,
"self": 0.0005578109999078151
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.7839698888934,
"count": 233248,
"is_parallel": true,
"self": 36.081058744023494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.34104932398452,
"count": 233248,
"is_parallel": true,
"self": 79.34104932398452
},
"communicator.exchange": {
"total": 1023.5471552629423,
"count": 233248,
"is_parallel": true,
"self": 1023.5471552629423
},
"steps_from_proto": {
"total": 100.81470655794305,
"count": 233248,
"is_parallel": true,
"self": 40.649325926730285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.16538063121277,
"count": 466496,
"is_parallel": true,
"self": 60.16538063121277
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 489.3054802520139,
"count": 233249,
"self": 6.404714791018591,
"children": {
"process_trajectory": {
"total": 157.2357017609986,
"count": 233249,
"self": 156.0111779989993,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2245237619993077,
"count": 10,
"self": 1.2245237619993077
}
}
},
"_update_policy": {
"total": 325.6650636999967,
"count": 97,
"self": 271.0878858149896,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.57717788500713,
"count": 2910,
"self": 54.57717788500713
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.780004125379492e-07,
"count": 1,
"self": 7.780004125379492e-07
},
"TrainerController._save_models": {
"total": 0.1216171029996076,
"count": 1,
"self": 0.0020505329998741217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11956656999973347,
"count": 1,
"self": 0.11956656999973347
}
}
}
}
}
}
}